diff options
Diffstat (limited to 'include')
639 files changed, 16658 insertions, 6092 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 531c1e9a7d10..18197c16149f 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -3,7 +3,7 @@ * * Name: acbuffer.h - Support for buffers returned by ACPI predefined names * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index a225eff499c8..e92f84fa8c68 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -3,7 +3,7 @@ * * Name: acconfig.h - Global configuration constants * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 2fc624a61769..ea3b1c41bc79 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -3,7 +3,7 @@ * * Name: acexcep.h - Exception codes returned by the ACPI subsystem * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -59,11 +59,11 @@ struct acpi_exception_info { #define AE_OK (acpi_status) 0x0000 -#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL) -#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML) -#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER) -#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES) -#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL) +#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL) +#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML) +#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER) +#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES) +#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL) /* * Environmental exceptions diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 8922edb32730..a2bc381c7ce7 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -3,7 +3,7 @@ * * Name: acnames.h - Global names and strings * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index c5d900c0ecda..1538a6853822 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -3,7 +3,7 @@ * * Name: acoutput.h -- debug output * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h index e3e8051d4812..6f6282a862bc 100644 --- a/include/acpi/acpi.h +++ b/include/acpi/acpi.h @@ -3,7 +3,7 @@ * * Name: acpi.h - Master public include file used to interface to ACPICA * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 6d1879bf9440..02a716a0af5d 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -684,8 +684,15 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); struct acpi_device * +acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv); +struct acpi_device * acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); +#define for_each_acpi_dev_match(adev, hid, uid, hrv) \ + for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ + adev; \ + adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv)) + static inline void acpi_dev_put(struct acpi_device *adev) { put_device(&adev->dev); diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index d4f39a20aa2a..94d356fcc483 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -15,19 +15,12 @@ * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst * if you add to this list. */ -#define ACPI_BUS_COMPONENT 0x00010000 -#define ACPI_AC_COMPONENT 0x00020000 -#define ACPI_BATTERY_COMPONENT 0x00040000 -#define ACPI_BUTTON_COMPONENT 0x00080000 #define ACPI_SBS_COMPONENT 0x00100000 #define ACPI_FAN_COMPONENT 0x00200000 #define ACPI_PCI_COMPONENT 0x00400000 -#define ACPI_POWER_COMPONENT 0x00800000 #define ACPI_CONTAINER_COMPONENT 0x01000000 #define ACPI_SYSTEM_COMPONENT 0x02000000 -#define ACPI_THERMAL_COMPONENT 0x04000000 #define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000 -#define ACPI_VIDEO_COMPONENT 0x10000000 #define ACPI_PROCESSOR_COMPONENT 0x20000000 /* diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index a4c6ef809e27..40a91ce87e04 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -30,6 +30,10 @@ static inline int pxm_to_node(int pxm) { return 0; } +static inline int node_to_pxm(int node) +{ + return 0; +} #endif /* CONFIG_ACPI_NUMA */ #ifdef CONFIG_ACPI_HMAT diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 33bb8c9a089d..690c369b717a 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -5,7 +5,7 @@ * interfaces must be implemented by OSL to interface the * ACPI components to the host operating system. * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 0bba8b8c350e..370293ee8399 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -3,7 +3,7 @@ * * Name: acpixf.h - External interfaces to the ACPI subsystem * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20201113 +#define ACPI_CA_VERSION 0x20210105 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h index d3521894ce6a..9bccac9becd7 100644 --- a/include/acpi/acrestyp.h +++ b/include/acpi/acrestyp.h @@ -3,7 +3,7 @@ * * Name: acrestyp.h - Defines, types, and structures for resource descriptors * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 5007c41f4d54..f9cda909f92c 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -3,7 +3,7 @@ * * Name: actbl.h - Basic ACPI Table Definitions * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 43549547ed3e..af0a8c3b87b7 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -3,7 +3,7 @@ * * Name: actbl1.h - Additional ACPI table definitions * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -514,7 +514,8 @@ enum acpi_dmar_type { ACPI_DMAR_TYPE_ROOT_ATS = 2, ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, ACPI_DMAR_TYPE_NAMESPACE = 4, - ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ + ACPI_DMAR_TYPE_SATC = 5, + ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */ }; /* DMAR Device Scope structure */ @@ -607,6 +608,14 @@ struct acpi_dmar_andd { char device_name[1]; }; +/* 5: SOC Integrated Address Translation Cache Reporting Structure */ + +struct acpi_dmar_satc { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; /******************************************************************************* * * DRTM - Dynamic Root of Trust for Measurement table diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index ec66779cb193..d6478c430c99 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -3,7 +3,7 @@ * * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -33,7 +33,6 @@ #define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ #define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ #define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ -#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ #define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ #define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ #define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ @@ -937,29 +936,6 @@ struct acpi_table_msdm { /******************************************************************************* * - * MTMR - MID Timer Table - * Version 1 - * - * Conforms to "Simple Firmware Interface Specification", - * Draft 0.8.2, Oct 19, 2010 - * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table. - * - ******************************************************************************/ - -struct acpi_table_mtmr { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* MTMR entry */ - -struct acpi_mtmr_entry { - struct acpi_generic_address physical_address; - u32 frequency; - u32 irq; -}; - -/******************************************************************************* - * * NFIT - NVDIMM Interface Table (ACPI 6.0+) * Version 1 * diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index bdcac69fa6bd..df5f4b27f3aa 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -3,7 +3,7 @@ * * Name: actbl3.h - ACPI Table Definitions * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -33,7 +33,6 @@ #define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ #define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ #define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ -#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */ #define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ #define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ #define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ @@ -486,28 +485,6 @@ struct acpi_table_uefi { /******************************************************************************* * - * VRTC - Virtual Real Time Clock Table - * Version 1 - * - * Conforms to "Simple Firmware Interface Specification", - * Draft 0.8.2, Oct 19, 2010 - * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table. - * - ******************************************************************************/ - -struct acpi_table_vrtc { - struct acpi_table_header header; /* Common ACPI table header */ -}; - -/* VRTC entry */ - -struct acpi_vrtc_entry { - struct acpi_generic_address physical_address; - u32 irq; -}; - -/******************************************************************************* - * * WAET - Windows ACPI Emulated devices Table * Version 1 * diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 647cb11d0a0a..92c71dfce0d5 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -3,7 +3,7 @@ * * Name: actypes.h - Common data types for the entire ACPI subsystem * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -1286,4 +1286,10 @@ typedef enum { #define ACPI_OPT_END -1 +/* Definitions for explicit fallthrough */ + +#ifndef ACPI_FALLTHROUGH +#define ACPI_FALLTHROUGH do {} while(0) +#endif + #endif /* __ACTYPES_H__ */ diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h index fb7d8d1fd93c..a5c2ca019a12 100644 --- a/include/acpi/acuuid.h +++ b/include/acpi/acuuid.h @@ -3,7 +3,7 @@ * * Name: acuuid.h - ACPI-related UUID/GUID definitions * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 232838d28f50..c7fc4524e151 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -39,7 +39,7 @@ struct cpc_reg { u8 bit_width; u8 bit_offset; u8 access_width; - u64 __iomem address; + u64 address; } __packed; /* diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index 8f6b2654c0b3..e8958e0d1646 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -3,7 +3,7 @@ * * Name: acenv.h - Host and compiler configuration * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h index c3facf5f8495..277fe2fa4d9b 100644 --- a/include/acpi/platform/acenvex.h +++ b/include/acpi/platform/acenvex.h @@ -3,7 +3,7 @@ * * Name: acenvex.h - Extra host and compiler configuration * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 7d63d03cf507..0cd4f61d4248 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -3,7 +3,7 @@ * * Name: acgcc.h - GCC specific defines, etc. * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ @@ -54,4 +54,19 @@ typedef __builtin_va_list va_list; #define ACPI_USE_NATIVE_MATH64 +/* GCC did not support __has_attribute until 5.1. */ + +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +/* + * Explictly mark intentional explicit fallthrough to silence + * -Wimplicit-fallthrough in GCC 7.1+. + */ + +#if __has_attribute(__fallthrough__) +#define ACPI_FALLTHROUGH __attribute__((__fallthrough__)) +#endif + #endif /* __ACGCC_H__ */ diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h index 7c88fd1de955..738d52865e0a 100644 --- a/include/acpi/platform/acgccex.h +++ b/include/acpi/platform/acgccex.h @@ -3,7 +3,7 @@ * * Name: acgccex.h - Extra GCC specific defines, etc. * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h index e7fd5e71be62..550fe9a8cd6c 100644 --- a/include/acpi/platform/acintel.h +++ b/include/acpi/platform/acintel.h @@ -3,7 +3,7 @@ * * Name: acintel.h - VC specific defines, etc. * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 72f52a1342a0..b3ffb9bbf664 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -3,7 +3,7 @@ * * Name: aclinux.h - OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index 04f88f2de781..5f642b07ad64 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -3,7 +3,7 @@ * * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux * - * Copyright (C) 2000 - 2020, Intel Corp. + * Copyright (C) 2000 - 2021, Intel Corp. * *****************************************************************************/ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 267f6dfb8960..302506bbc2a4 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -35,7 +35,6 @@ mandatory-y += kprobes.h mandatory-y += linkage.h mandatory-y += local.h mandatory-y += local64.h -mandatory-y += mm-arch-hooks.h mandatory-y += mmiowb.h mandatory-y += mmu.h mandatory-y += mmu_context.h @@ -51,6 +50,7 @@ mandatory-y += sections.h mandatory-y += serial.h mandatory-y += shmparam.h mandatory-y += simd.h +mandatory-y += softirq_stack.h mandatory-y += switch_to.h mandatory-y += timex.h mandatory-y += tlbflush.h diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 365345f9a9e3..07a36a874dca 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -33,7 +33,7 @@ */ .macro ___EXPORT_SYMBOL name,val,sec -#ifdef CONFIG_MODULES +#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS) .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN __ksymtab_\name: diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index e73a11850055..83448e837ded 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -88,7 +88,8 @@ #define HV_CONNECT_PORT BIT(7) #define HV_ACCESS_STATS BIT(8) #define HV_DEBUGGING BIT(11) -#define HV_CPU_POWER_MANAGEMENT BIT(12) +#define HV_CPU_MANAGEMENT BIT(12) +#define HV_ISOLATION BIT(22) /* @@ -141,6 +142,9 @@ struct ms_hyperv_tsc_page { #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 #define HVCALL_SEND_IPI_EX 0x0015 +#define HVCALL_GET_PARTITION_ID 0x0046 +#define HVCALL_DEPOSIT_MEMORY 0x0048 +#define HVCALL_CREATE_VP 0x004e #define HVCALL_GET_VP_REGISTERS 0x0050 #define HVCALL_SET_VP_REGISTERS 0x0051 #define HVCALL_POST_MESSAGE 0x005c @@ -148,6 +152,9 @@ struct ms_hyperv_tsc_page { #define HVCALL_POST_DEBUG_DATA 0x0069 #define HVCALL_RETRIEVE_DEBUG_DATA 0x006a #define HVCALL_RESET_DEBUG_SESSION 0x006b +#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076 +#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c +#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d #define HVCALL_RETARGET_INTERRUPT 0x007e #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 @@ -407,19 +414,144 @@ struct hv_tlb_flush_ex { u64 gva_list[]; } __packed; +/* HvGetPartitionId hypercall (output only) */ +struct hv_get_partition_id { + u64 partition_id; +} __packed; + +/* HvDepositMemory hypercall */ +struct hv_deposit_memory { + u64 partition_id; + u64 gpa_page_list[]; +} __packed; + +struct hv_proximity_domain_flags { + u32 proximity_preferred : 1; + u32 reserved : 30; + u32 proximity_info_valid : 1; +} __packed; + +/* Not a union in windows but useful for zeroing */ +union hv_proximity_domain_info { + struct { + u32 domain_id; + struct hv_proximity_domain_flags flags; + }; + u64 as_uint64; +} __packed; + +struct hv_lp_startup_status { + u64 hv_status; + u64 substatus1; + u64 substatus2; + u64 substatus3; + u64 substatus4; + u64 substatus5; + u64 substatus6; +} __packed; + +/* HvAddLogicalProcessor hypercall */ +struct hv_add_logical_processor_in { + u32 lp_index; + u32 apic_id; + union hv_proximity_domain_info proximity_domain_info; + u64 flags; +} __packed; + +struct hv_add_logical_processor_out { + struct hv_lp_startup_status startup_status; +} __packed; + +enum HV_SUBNODE_TYPE +{ + HvSubnodeAny = 0, + HvSubnodeSocket = 1, + HvSubnodeAmdNode = 2, + HvSubnodeL3 = 3, + HvSubnodeCount = 4, + HvSubnodeInvalid = -1 +}; + +/* HvCreateVp hypercall */ +struct hv_create_vp { + u64 partition_id; + u32 vp_index; + u8 padding[3]; + u8 subnode_type; + u64 subnode_id; + union hv_proximity_domain_info proximity_domain_info; + u64 flags; +} __packed; + +enum hv_interrupt_source { + HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */ + HV_INTERRUPT_SOURCE_IOAPIC, +}; + +union hv_msi_address_register { + u32 as_uint32; + struct { + u32 reserved1:2; + u32 destination_mode:1; + u32 redirection_hint:1; + u32 reserved2:8; + u32 destination_id:8; + u32 msi_base:12; + }; +} __packed; + +union hv_msi_data_register { + u32 as_uint32; + struct { + u32 vector:8; + u32 delivery_mode:3; + u32 reserved1:3; + u32 level_assert:1; + u32 trigger_mode:1; + u32 reserved2:16; + }; +} __packed; + /* HvRetargetDeviceInterrupt hypercall */ union hv_msi_entry { u64 as_uint64; struct { - u32 address; - u32 data; + union hv_msi_address_register address; + union hv_msi_data_register data; } __packed; }; +union hv_ioapic_rte { + u64 as_uint64; + + struct { + u32 vector:8; + u32 delivery_mode:3; + u32 destination_mode:1; + u32 delivery_status:1; + u32 interrupt_polarity:1; + u32 remote_irr:1; + u32 trigger_mode:1; + u32 interrupt_mask:1; + u32 reserved1:15; + + u32 reserved2:24; + u32 destination_id:8; + }; + + struct { + u32 low_uint32; + u32 high_uint32; + }; +} __packed; + struct hv_interrupt_entry { - u32 source; /* 1 for MSI(-X) */ + u32 source; u32 reserved1; - union hv_msi_entry msi_entry; + union { + union hv_msi_entry msi_entry; + union hv_ioapic_rte ioapic_rte; + }; } __packed; /* @@ -494,4 +626,117 @@ struct hv_set_vp_registers_input { } element[]; } __packed; +enum hv_device_type { + HV_DEVICE_TYPE_LOGICAL = 0, + HV_DEVICE_TYPE_PCI = 1, + HV_DEVICE_TYPE_IOAPIC = 2, + HV_DEVICE_TYPE_ACPI = 3, +}; + +typedef u16 hv_pci_rid; +typedef u16 hv_pci_segment; +typedef u64 hv_logical_device_id; +union hv_pci_bdf { + u16 as_uint16; + + struct { + u8 function:3; + u8 device:5; + u8 bus; + }; +} __packed; + +union hv_pci_bus_range { + u16 as_uint16; + + struct { + u8 subordinate_bus; + u8 secondary_bus; + }; +} __packed; + +union hv_device_id { + u64 as_uint64; + + struct { + u64 reserved0:62; + u64 device_type:2; + }; + + /* HV_DEVICE_TYPE_LOGICAL */ + struct { + u64 id:62; + u64 device_type:2; + } logical; + + /* HV_DEVICE_TYPE_PCI */ + struct { + union { + hv_pci_rid rid; + union hv_pci_bdf bdf; + }; + + hv_pci_segment segment; + union hv_pci_bus_range shadow_bus_range; + + u16 phantom_function_bits:2; + u16 source_shadow:1; + + u16 rsvdz0:11; + u16 device_type:2; + } pci; + + /* HV_DEVICE_TYPE_IOAPIC */ + struct { + u8 ioapic_id; + u8 rsvdz0; + u16 rsvdz1; + u16 rsvdz2; + + u16 rsvdz3:14; + u16 device_type:2; + } ioapic; + + /* HV_DEVICE_TYPE_ACPI */ + struct { + u32 input_mapping_base; + u32 input_mapping_count:30; + u32 device_type:2; + } acpi; +} __packed; + +enum hv_interrupt_trigger_mode { + HV_INTERRUPT_TRIGGER_MODE_EDGE = 0, + HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1, +}; + +struct hv_device_interrupt_descriptor { + u32 interrupt_type; + u32 trigger_mode; + u32 vector_count; + u32 reserved; + struct hv_device_interrupt_target target; +} __packed; + +struct hv_input_map_device_interrupt { + u64 partition_id; + u64 device_id; + u64 flags; + struct hv_interrupt_entry logical_interrupt_entry; + struct hv_device_interrupt_descriptor interrupt_descriptor; +} __packed; + +struct hv_output_map_device_interrupt { + struct hv_interrupt_entry interrupt_entry; +} __packed; + +struct hv_input_unmap_device_interrupt { + u64 partition_id; + u64 device_id; + struct hv_interrupt_entry interrupt_entry; +} __packed; + +#define HV_SOURCE_SHADOW_NONE 0x0 +#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1 + #endif diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h deleted file mode 100644 index 5ff0e5193f85..000000000000 --- a/include/asm-generic/mm-arch-hooks.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Architecture specific mm hooks - */ - -#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H -#define _ASM_GENERIC_MM_ARCH_HOOKS_H - -/* - * This file should be included through arch/../include/asm/Kbuild for - * the architecture which doesn't need specific mm hooks. - * - * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h - * are used. - */ - -#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */ diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index c57799684170..dff58a3db5d5 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -27,11 +27,14 @@ struct ms_hyperv_info { u32 features; + u32 features_b; u32 misc_features; u32 hints; u32 nested_features; u32 max_vp_index; u32 max_lp_index; + u32 isolation_config_a; + u32 isolation_config_b; }; extern struct ms_hyperv_info ms_hyperv; @@ -169,6 +172,8 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); +enum hv_isolation_type hv_get_isolation_type(void); +bool hv_is_isolation_supported(void); void hyperv_cleanup(void); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h new file mode 100644 index 000000000000..1a3ad6d29833 --- /dev/null +++ b/include/asm-generic/numa.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_NUMA_H +#define __ASM_GENERIC_NUMA_H + +#ifdef CONFIG_NUMA + +#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2) + +int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) + +extern nodemask_t numa_nodes_parsed __initdata; + +extern bool numa_off; + +/* Mappings between node number and cpus on that node. */ +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +void numa_clear_node(unsigned int cpu); + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +const struct cpumask *cpumask_of_node(int node); +#else +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +static inline const struct cpumask *cpumask_of_node(int node) +{ + if (node == NUMA_NO_NODE) + return cpu_all_mask; + + return node_to_cpumask_map[node]; +} +#endif + +void __init arch_numa_init(void); +int __init numa_add_memblk(int nodeid, u64 start, u64 end); +void __init numa_set_distance(int from, int to, int distance); +void __init numa_free_distance(void); +void __init early_map_cpu_to_node(unsigned int cpu, int nid); +void numa_store_cpu_info(unsigned int cpu); +void numa_add_cpu(unsigned int cpu); +void numa_remove_cpu(unsigned int cpu); + +#else /* CONFIG_NUMA */ + +static inline void numa_store_cpu_info(unsigned int cpu) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void arch_numa_init(void) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* __ASM_GENERIC_NUMA_H */ diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index fe801f01625e..6fc47561814c 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h @@ -63,11 +63,7 @@ extern unsigned long memory_end; #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS -#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) -#else #define PAGE_OFFSET (0) -#endif #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 84ce841ce735..7ae0ece07b4e 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -15,6 +15,8 @@ #include <asm-generic/qrwlock_types.h> +/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ + /* * Writer states & reader shift and bias. */ @@ -116,15 +118,26 @@ static inline void queued_write_unlock(struct qrwlock *lock) smp_store_release(&lock->wlocked, 0); } +/** + * queued_rwlock_is_contended - check if the lock is contended + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock contended, 0 otherwise + */ +static inline int queued_rwlock_is_contended(struct qrwlock *lock) +{ + return arch_spin_is_locked(&lock->wait_lock); +} + /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_lock(l) queued_read_lock(l) -#define arch_write_lock(l) queued_write_lock(l) -#define arch_read_trylock(l) queued_read_trylock(l) -#define arch_write_trylock(l) queued_write_trylock(l) -#define arch_read_unlock(l) queued_read_unlock(l) -#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h new file mode 100644 index 000000000000..eceeecf6a5bd --- /dev/null +++ b/include/asm-generic/softirq_stack.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H +#define __ASM_GENERIC_SOFTIRQ_STACK_H + +#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK +void do_softirq_own_stack(void); +#else +static inline void do_softirq_own_stack(void) +{ + __do_softirq(); +} +#endif + +#endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6661ee1cff47..2c68a545ffa7 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -46,7 +46,9 @@ * * The mmu_gather API consists of: * - * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather + * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() + * + * start and finish a mmu_gather * * Finish in particular will issue a (final) TLB invalidate and free * all (remaining) queued pages. @@ -91,7 +93,7 @@ * * - mmu_gather::fullmm * - * A flag set by tlb_gather_mmu() to indicate we're going to free + * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free * the entire mm; this allows a number of optimizations. * * - We can ignore tlb_{start,end}_vma(); because we don't diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b2b3d81b1535..0331d5d49551 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -90,15 +90,18 @@ * .data. We don't want to pull in .data..other sections, which Linux * has defined. Same for text and bss. * + * With LTO_CLANG, the linker also splits sections by default, so we need + * these macros to combine the sections during the final link. + * * RODATA_MAIN is not used because existing code already defines .rodata.x * sections to be brought in with rodata. */ -#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* -#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* -#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text @@ -316,6 +319,16 @@ #define THERMAL_TABLE(name) #endif +#ifdef CONFIG_DTPM +#define DTPM_TABLE() \ + . = ALIGN(8); \ + __dtpm_table = .; \ + KEEP(*(__dtpm_table)) \ + __dtpm_table_end = .; +#else +#define DTPM_TABLE() +#endif + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ @@ -393,7 +406,10 @@ . = ALIGN(8); \ __start_static_call_sites = .; \ KEEP(*(.static_call_sites)) \ - __stop_static_call_sites = .; + __stop_static_call_sites = .; \ + __start_static_call_tramp_key = .; \ + KEEP(*(.static_call_tramp_key)) \ + __stop_static_call_tramp_key = .; /* * Allow architectures to handle ro_after_init data on their @@ -459,7 +475,7 @@ } \ \ /* Built-in firmware blobs */ \ - .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ __start_builtin_fw = .; \ KEEP(*(.builtin_fw)) \ __end_builtin_fw = .; \ @@ -481,27 +497,6 @@ __stop___ksymtab_gpl = .; \ } \ \ - /* Kernel symbol table: Normal unused symbols */ \ - __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ - __start___ksymtab_unused = .; \ - KEEP(*(SORT(___ksymtab_unused+*))) \ - __stop___ksymtab_unused = .; \ - } \ - \ - /* Kernel symbol table: GPL-only unused symbols */ \ - __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ - __start___ksymtab_unused_gpl = .; \ - KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ - __stop___ksymtab_unused_gpl = .; \ - } \ - \ - /* Kernel symbol table: GPL-future-only symbols */ \ - __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ - __start___ksymtab_gpl_future = .; \ - KEEP(*(SORT(___ksymtab_gpl_future+*))) \ - __stop___ksymtab_gpl_future = .; \ - } \ - \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ __start___kcrctab = .; \ @@ -516,27 +511,6 @@ __stop___kcrctab_gpl = .; \ } \ \ - /* Kernel symbol table: Normal unused symbols */ \ - __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ - __start___kcrctab_unused = .; \ - KEEP(*(SORT(___kcrctab_unused+*))) \ - __stop___kcrctab_unused = .; \ - } \ - \ - /* Kernel symbol table: GPL-only unused symbols */ \ - __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ - __start___kcrctab_unused_gpl = .; \ - KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ - __stop___kcrctab_unused_gpl = .; \ - } \ - \ - /* Kernel symbol table: GPL-future-only symbols */ \ - __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ - __start___kcrctab_gpl_future = .; \ - KEEP(*(SORT(___kcrctab_gpl_future+*))) \ - __stop___kcrctab_gpl_future = .; \ - } \ - \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ *(__ksymtab_strings) \ @@ -733,6 +707,7 @@ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ THERMAL_TABLE(governor) \ + DTPM_TABLE() \ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() \ @@ -828,8 +803,13 @@ /* DWARF 4 */ \ .debug_types 0 : { *(.debug_types) } \ /* DWARF 5 */ \ + .debug_addr 0 : { *(.debug_addr) } \ + .debug_line_str 0 : { *(.debug_line_str) } \ + .debug_loclists 0 : { *(.debug_loclists) } \ .debug_macro 0 : { *(.debug_macro) } \ - .debug_addr 0 : { *(.debug_addr) } + .debug_names 0 : { *(.debug_names) } \ + .debug_rnglists 0 : { *(.debug_rnglists) } \ + .debug_str_offsets 0 : { *(.debug_str_offsets) } /* Stabs debugging sections. */ #define STABS_DEBUG \ @@ -988,12 +968,13 @@ #endif /* - * Clang's -fsanitize=kernel-address and -fsanitize=thread produce - * unwanted sections (.eh_frame and .init_array.*), but - * CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections. + * Clang's -fprofile-arcs, -fsanitize=kernel-address, and + * -fsanitize=thread produce unwanted sections (.eh_frame + * and .init_array.*), but CONFIG_CONSTRUCTORS wants to + * keep any .init_array.* sections. * https://bugs.llvm.org/show_bug.cgi?id=46478 */ -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) +#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) # ifdef CONFIG_CONSTRUCTORS # define SANITIZER_DISCARDS \ *(.eh_frame) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 18dd7a4aaf7d..86f0748009af 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -189,45 +189,6 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst) return inst->__ctx; } -struct crypto_cipher_spawn { - struct crypto_spawn base; -}; - -static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, - struct crypto_instance *inst, - const char *name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - return crypto_grab_spawn(&spawn->base, inst, name, type, mask); -} - -static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) -{ - crypto_drop_spawn(&spawn->base); -} - -static inline struct crypto_alg *crypto_spawn_cipher_alg( - struct crypto_cipher_spawn *spawn) -{ - return spawn->base.alg; -} - -static inline struct crypto_cipher *crypto_spawn_cipher( - struct crypto_cipher_spawn *spawn) -{ - u32 type = CRYPTO_ALG_TYPE_CIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK; - - return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); -} - -static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) -{ - return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; -} - static inline struct crypto_async_request *crypto_get_backlog( struct crypto_queue *queue) { diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h new file mode 100644 index 000000000000..18875f16f8ca --- /dev/null +++ b/include/crypto/blake2b.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _CRYPTO_BLAKE2B_H +#define _CRYPTO_BLAKE2B_H + +#include <linux/bug.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/string.h> + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[BLAKE2B_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, + BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, + BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL, + BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL, + BLAKE2B_IV4 = 0x510E527FADE682D1ULL, + BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL, + BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL, + BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL, +}; + +static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, + const void *key, size_t keylen) +{ + state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2B_IV1; + state->h[2] = BLAKE2B_IV2; + state->h[3] = BLAKE2B_IV3; + state->h[4] = BLAKE2B_IV4; + state->h[5] = BLAKE2B_IV5; + state->h[6] = BLAKE2B_IV6; + state->h[7] = BLAKE2B_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); + state->buflen = BLAKE2B_BLOCK_SIZE; + } +} + +#endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index b471deac28ff..bc3fb59442ce 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -3,15 +3,14 @@ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ -#ifndef BLAKE2S_H -#define BLAKE2S_H +#ifndef _CRYPTO_BLAKE2S_H +#define _CRYPTO_BLAKE2S_H +#include <linux/bug.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> -#include <asm/bug.h> - enum blake2s_lengths { BLAKE2S_BLOCK_SIZE = 64, BLAKE2S_HASH_SIZE = 32, @@ -24,6 +23,7 @@ enum blake2s_lengths { }; struct blake2s_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ u32 h[8]; u32 t[2]; u32 f[2]; @@ -43,29 +43,34 @@ enum blake2s_iv { BLAKE2S_IV7 = 0x5BE0CD19UL, }; -void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); -void blake2s_final(struct blake2s_state *state, u8 *out); - -static inline void blake2s_init_param(struct blake2s_state *state, - const u32 param) +static inline void __blake2s_init(struct blake2s_state *state, size_t outlen, + const void *key, size_t keylen) { - *state = (struct blake2s_state){{ - BLAKE2S_IV0 ^ param, - BLAKE2S_IV1, - BLAKE2S_IV2, - BLAKE2S_IV3, - BLAKE2S_IV4, - BLAKE2S_IV5, - BLAKE2S_IV6, - BLAKE2S_IV7, - }}; + state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2S_IV1; + state->h[2] = BLAKE2S_IV2; + state->h[3] = BLAKE2S_IV3; + state->h[4] = BLAKE2S_IV4; + state->h[5] = BLAKE2S_IV5; + state->h[6] = BLAKE2S_IV6; + state->h[7] = BLAKE2S_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); + state->buflen = BLAKE2S_BLOCK_SIZE; + } } static inline void blake2s_init(struct blake2s_state *state, const size_t outlen) { - blake2s_init_param(state, 0x01010000 | outlen); - state->outlen = outlen; + __blake2s_init(state, outlen, NULL, 0); } static inline void blake2s_init_key(struct blake2s_state *state, @@ -75,12 +80,12 @@ static inline void blake2s_init_key(struct blake2s_state *state, WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); - blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen); - memcpy(state->buf, key, keylen); - state->buflen = BLAKE2S_BLOCK_SIZE; - state->outlen = outlen; + __blake2s_init(state, outlen, key, keylen); } +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + static inline void blake2s(u8 *out, const u8 *in, const u8 *key, const size_t outlen, const size_t inlen, const size_t keylen) @@ -91,11 +96,7 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key, outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || (!key && keylen))); - if (keylen) - blake2s_init_key(&state, outlen, key, keylen); - else - blake2s_init(&state, outlen); - + __blake2s_init(&state, outlen, key, keylen); blake2s_update(&state, in, inlen); blake2s_final(&state, out); } @@ -103,4 +104,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key, void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen); -#endif /* BLAKE2S_H */ +#endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index af2ff31ff619..13f8a6a54ca8 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -149,7 +149,7 @@ struct ahash_alg { struct shash_desc { struct crypto_shash *tfm; - void *__ctx[] CRYPTO_MINALIGN_ATTR; + void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); }; #define HASH_MAX_DIGESTSIZE 64 @@ -162,9 +162,9 @@ struct shash_desc { #define HASH_MAX_STATESIZE 512 -#define SHASH_DESC_ON_STACK(shash, ctx) \ - char __##shash##_desc[sizeof(struct shash_desc) + \ - HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \ +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ + __aligned(__alignof__(struct shash_desc)); \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc /** diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h new file mode 100644 index 000000000000..982fe5e8471c --- /dev/null +++ b/include/crypto/internal/blake2b.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2b implementations. + * Keep this in sync with the corresponding BLAKE2s header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2B_H +#define _CRYPTO_INTERNAL_BLAKE2B_H + +#include <crypto/blake2b.h> +#include <crypto/internal/hash.h> +#include <linux/string.h> + +void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void blake2b_set_lastblock(struct blake2b_state *state) +{ + state->f[0] = -1; +} + +typedef void (*blake2b_compress_t)(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void __blake2b_update(struct blake2b_state *state, + const u8 *in, size_t inlen, + blake2b_compress_t compress) +{ + const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2B_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); + in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static inline void __blake2b_final(struct blake2b_state *state, u8 *out, + blake2b_compress_t compress) +{ + int i; + + blake2b_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */ + (*compress)(state, state->buf, 1, state->buflen); + for (i = 0; i < ARRAY_SIZE(state->h); i++) + __cpu_to_le64s(&state->h[i]); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2b */ + +struct blake2b_tfm_ctx { + u8 key[BLAKE2B_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2b_init(struct shash_desc *desc) +{ + const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2b_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2b_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2b_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_update(state, in, inlen, compress); + return 0; +} + +static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_final(state, out, compress); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */ diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h index 6e376ae6b6b5..8e50d487500f 100644 --- a/include/crypto/internal/blake2s.h +++ b/include/crypto/internal/blake2s.h @@ -1,14 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2s implementations. + * Keep this in sync with the corresponding BLAKE2b header. + */ -#ifndef BLAKE2S_INTERNAL_H -#define BLAKE2S_INTERNAL_H +#ifndef _CRYPTO_INTERNAL_BLAKE2S_H +#define _CRYPTO_INTERNAL_BLAKE2S_H #include <crypto/blake2s.h> - -struct blake2s_tfm_ctx { - u8 key[BLAKE2S_KEY_SIZE]; - unsigned int keylen; -}; +#include <crypto/internal/hash.h> +#include <linux/string.h> void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, size_t nblocks, const u32 inc); @@ -23,4 +24,96 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state) state->f[0] = -1; } -#endif /* BLAKE2S_INTERNAL_H */ +typedef void (*blake2s_compress_t)(struct blake2s_state *state, + const u8 *block, size_t nblocks, u32 inc); + +/* Helper functions for BLAKE2s shared by the library and shash APIs */ + +static inline void __blake2s_update(struct blake2s_state *state, + const u8 *in, size_t inlen, + blake2s_compress_t compress) +{ + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static inline void __blake2s_final(struct blake2s_state *state, u8 *out, + blake2s_compress_t compress) +{ + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + (*compress)(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2s */ + +struct blake2s_tfm_ctx { + u8 key[BLAKE2S_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2s_init(struct shash_desc *desc) +{ + const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2s_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2s_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2s_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2s_compress_t compress) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_update(state, in, inlen, compress); + return 0; +} + +static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, + blake2s_compress_t compress) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_final(state, out, compress); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h new file mode 100644 index 000000000000..a9174ba90250 --- /dev/null +++ b/include/crypto/internal/cipher.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 David S. Miller (davem@redhat.com) + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> + * + * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> + * and Nettle, by Niels Möller. + */ + +#ifndef _CRYPTO_INTERNAL_CIPHER_H +#define _CRYPTO_INTERNAL_CIPHER_H + +#include <crypto/algapi.h> + +struct crypto_cipher { + struct crypto_tfm base; +}; + +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} + +static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_spawn_cipher_alg( + struct crypto_cipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_cipher_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +#endif diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 10226c12c5df..a2339f80a615 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -9,6 +9,7 @@ #define _CRYPTO_INTERNAL_SKCIPHER_H #include <crypto/algapi.h> +#include <crypto/internal/cipher.h> #include <crypto/skcipher.h> #include <linux/list.h> #include <linux/types.h> @@ -132,7 +133,6 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic); -void skcipher_walk_atomise(struct skcipher_walk *walk); int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 948c5203ca9c..47accec68cb0 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -12,7 +12,6 @@ #include <linux/keyctl.h> #include <linux/oid_registry.h> -#include <crypto/akcipher.h> /* * Cryptographic data for the public-key subtype of the asymmetric key type. diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index ce7023e9115d..ac5a28eff2c8 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -66,6 +66,8 @@ * * For an implementation of how to use this look at * drm_atomic_helper_setup_commit() from the atomic helper library. + * + * See also drm_crtc_commit_wait(). */ struct drm_crtc_commit { /** @@ -436,6 +438,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) kref_put(&commit->ref, __drm_crtc_commit_free); } +int drm_crtc_commit_wait(struct drm_crtc_commit *commit); + struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index edffd1dcca3e..632ad7faa006 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1016,6 +1016,11 @@ struct drm_device; #define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ #define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ +#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */ +# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0) +# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0 +# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3) + /* Sideband MSG Buffers */ #define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ #define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 716990bace10..b81b3bfb08c8 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev, struct drm_pending_event *p); void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, + ktime_t timestamp); struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h new file mode 100644 index 000000000000..cfc5adee3d13 --- /dev/null +++ b/include/drm/drm_gem_atomic_helper.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __DRM_GEM_ATOMIC_HELPER_H__ +#define __DRM_GEM_ATOMIC_HELPER_H__ + +#include <linux/dma-buf-map.h> + +#include <drm/drm_plane.h> + +struct drm_simple_display_pipe; + +/* + * Plane Helpers + */ + +int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state); +int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/* + * Helpers for planes with shadow buffers + */ + +/** + * struct drm_shadow_plane_state - plane state for planes with shadow buffers + * + * For planes that use a shadow buffer, struct drm_shadow_plane_state + * provides the regular plane state plus mappings of the shadow buffer + * into kernel address space. + */ +struct drm_shadow_plane_state { + /** @base: plane state */ + struct drm_plane_state base; + + /* Transitional state - do not export or duplicate */ + + /** + * @map: Mappings of the plane's framebuffer BOs in to kernel address space + * + * The memory mappings stored in map should be established in the plane's + * prepare_fb callback and removed in the cleanup_fb callback. + */ + struct dma_buf_map map[4]; +}; + +/** + * to_drm_shadow_plane_state - upcasts from struct drm_plane_state + * @state: the plane state + */ +static inline struct drm_shadow_plane_state * +to_drm_shadow_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct drm_shadow_plane_state, base); +} + +void drm_gem_reset_shadow_plane(struct drm_plane *plane); +struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane); +void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_FUNCS - + * Initializes struct drm_plane_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_funcs to use the rsp helper functions. + */ +#define DRM_GEM_SHADOW_PLANE_FUNCS \ + .reset = drm_gem_reset_shadow_plane, \ + .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \ + .atomic_destroy_state = drm_gem_destroy_shadow_plane_state + +int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); +void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS - + * Initializes struct drm_plane_helper_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_helper_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \ + .prepare_fb = drm_gem_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_cleanup_shadow_fb + +int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe); +struct drm_plane_state * +drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe); +void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS - + * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \ + .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \ + .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \ + .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \ + .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state + +#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 6b013154911d..6bdffc7aa124 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -9,9 +9,6 @@ struct drm_framebuffer; struct drm_framebuffer_funcs; struct drm_gem_object; struct drm_mode_fb_cmd2; -struct drm_plane; -struct drm_plane_state; -struct drm_simple_display_pipe; #define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52) @@ -44,8 +41,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_afbc_framebuffer *afbc_fb); -int drm_gem_fb_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state); -int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state); #endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index a4bac02249c2..288055d397d9 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -172,19 +172,19 @@ struct drm_vram_mm { uint64_t vram_base; size_t vram_size; - struct ttm_bo_device bdev; + struct ttm_device bdev; }; /** * drm_vram_mm_of_bdev() - \ - Returns the container of type &struct ttm_bo_device for field bdev. + Returns the container of type &struct ttm_device for field bdev. * @bdev: the TTM BO device * * Returns: * The containing instance of &struct drm_vram_mm */ static inline struct drm_vram_mm *drm_vram_mm_of_bdev( - struct ttm_bo_device *bdev) + struct ttm_device *bdev) { return container_of(bdev, struct drm_vram_mm, bdev); } diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index eb706342861d..f3a4b47b3986 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1179,7 +1179,7 @@ struct drm_plane_helper_funcs { * members in the plane structure. * * Drivers which always have their buffers pinned should use - * drm_gem_fb_prepare_fb() for this hook. + * drm_gem_plane_helper_prepare_fb() for this hook. * * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. @@ -1233,9 +1233,8 @@ struct drm_plane_helper_funcs { * NOTE: * * This function is called in the check phase of an atomic update. The - * driver is not allowed to change anything outside of the free-standing - * state objects passed-in or assembled in the overall &drm_atomic_state - * update tracking structure. + * driver is not allowed to change anything outside of the + * &drm_atomic_state update tracking structure. * * RETURNS: * @@ -1245,7 +1244,7 @@ struct drm_plane_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); /** * @atomic_update: @@ -1263,7 +1262,7 @@ struct drm_plane_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_update)(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); /** * @atomic_disable: * @@ -1287,14 +1286,14 @@ struct drm_plane_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_disable)(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); /** * @atomic_async_check: * - * Drivers should set this function pointer to check if the plane state - * can be updated in a async fashion. Here async means "not vblank - * synchronized". + * Drivers should set this function pointer to check if the plane's + * atomic state can be updated in a async fashion. Here async means + * "not vblank synchronized". * * This hook is called by drm_atomic_async_check() to establish if a * given update can be committed asynchronously, that is, if it can @@ -1306,7 +1305,7 @@ struct drm_plane_helper_funcs { * can not be applied in asynchronous manner. */ int (*atomic_async_check)(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); /** * @atomic_async_update: @@ -1322,11 +1321,9 @@ struct drm_plane_helper_funcs { * update won't happen if there is an outstanding commit modifying * the same plane. * - * Note that unlike &drm_plane_helper_funcs.atomic_update this hook - * takes the new &drm_plane_state as parameter. When doing async_update - * drivers shouldn't replace the &drm_plane_state but update the - * current one with the new plane configurations in the new - * plane_state. + * When doing async_update drivers shouldn't replace the + * &drm_plane_state but update the current one with the new plane + * configurations in the new plane_state. * * Drivers should also swap the framebuffers between current plane * state (&drm_plane.state) and new_state. @@ -1345,7 +1342,7 @@ struct drm_plane_helper_funcs { * for deferring if needed, until a common solution is created. */ void (*atomic_async_update)(struct drm_plane *plane, - struct drm_plane_state *new_state); + struct drm_atomic_state *state); }; /** diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 8ef06ee1c8eb..1294610e84f4 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -79,8 +79,8 @@ struct drm_plane_state { * preserved. * * Drivers should store any implicit fence in this from their - * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb() - * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() + * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers. */ struct dma_fence *fence; @@ -538,10 +538,14 @@ struct drm_plane_funcs { * * For compatibility with legacy userspace, only overlay planes are made * available to userspace by default. Userspace clients may set the - * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they * wish to receive a universal plane list containing all plane types. See also * drm_for_each_legacy_plane(). * + * In addition to setting each plane's type, drivers need to setup the + * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy + * IOCTLs. See drm_crtc_init_with_planes(). + * * WARNING: The values of this enum is UABI since they're exposed in the "type" * property. */ @@ -557,19 +561,20 @@ enum drm_plane_type { /** * @DRM_PLANE_TYPE_PRIMARY: * - * Primary planes represent a "main" plane for a CRTC. Primary planes - * are the planes operated upon by CRTC modesetting and flipping - * operations described in the &drm_crtc_funcs.page_flip and - * &drm_crtc_funcs.set_config hooks. + * A primary plane attached to a CRTC is the most likely to be able to + * light up the CRTC when no scaling/cropping is used and the plane + * covers the whole CRTC. */ DRM_PLANE_TYPE_PRIMARY, /** * @DRM_PLANE_TYPE_CURSOR: * - * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes - * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and - * DRM_IOCTL_MODE_CURSOR2 IOCTLs. + * A cursor plane attached to a CRTC is more likely to be able to be + * enabled when no scaling/cropping is used and the framebuffer has the + * size indicated by &drm_mode_config.cursor_width and + * &drm_mode_config.cursor_height. Additionally, if the driver doesn't + * support modifiers, the framebuffer should have a linear layout. */ DRM_PLANE_TYPE_CURSOR, }; diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index e6dbf3161c2f..ef9944e9c5fc 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -117,7 +117,7 @@ struct drm_simple_display_pipe_funcs { * more details. * * Drivers which always have their buffers pinned should use - * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook. + * drm_gem_simple_display_pipe_prepare_fb() for this hook. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -149,6 +149,33 @@ struct drm_simple_display_pipe_funcs { * more details. */ void (*disable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @reset_plane: + * + * Optional, called by &drm_plane_funcs.reset. Please read the + * documentation for the &drm_plane_funcs.reset hook for more details. + */ + void (*reset_plane)(struct drm_simple_display_pipe *pipe); + + /** + * @duplicate_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please + * read the documentation for the &drm_plane_funcs.atomic_duplicate_state + * hook for more details. + */ + struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe); + + /** + * @destroy_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please + * read the documentation for the &drm_plane_funcs.atomic_destroy_state + * hook for more details. + */ + void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); }; /** diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index dd125f8c766c..733a3e2d1d10 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -247,7 +247,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); void drm_crtc_vblank_on(struct drm_crtc *crtc); u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); -void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); void drm_crtc_vblank_restore(struct drm_crtc *crtc); void drm_calc_timestamping_constants(struct drm_crtc *crtc, diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 975e8a67947f..1c815e0a14ed 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -206,6 +206,12 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, return s_job && atomic_inc_return(&s_job->karma) > threshold; } +enum drm_gpu_sched_stat { + DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NOMINAL, + DRM_GPU_SCHED_STAT_ENODEV, +}; + /** * struct drm_sched_backend_ops * @@ -230,10 +236,16 @@ struct drm_sched_backend_ops { struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); /** - * @timedout_job: Called when a job has taken too long to execute, - * to trigger GPU recovery. + * @timedout_job: Called when a job has taken too long to execute, + * to trigger GPU recovery. + * + * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, + * and the underlying driver has started or completed recovery. + * + * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer + * available, i.e. has been unplugged. */ - void (*timedout_job)(struct drm_sched_job *sched_job); + enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); /** * @free_job: Called once the job's finished fence has been signaled @@ -285,7 +297,8 @@ struct drm_gpu_scheduler { struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; - atomic_t score; + atomic_t *score; + atomic_t _score; bool ready; bool free_guilty; }; @@ -293,7 +306,7 @@ struct drm_gpu_scheduler { int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, uint32_t hw_submission, unsigned hang_limit, long timeout, - const char *name); + atomic_t *score, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 931e46191047..ebd0dd1c35b3 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -634,4 +634,15 @@ INTEL_VGA_DEVICE(0x4907, info), \ INTEL_VGA_DEVICE(0x4908, info) +/* ADL-S */ +#define INTEL_ADLS_IDS(info) \ + INTEL_VGA_DEVICE(0x4680, info), \ + INTEL_VGA_DEVICE(0x4681, info), \ + INTEL_VGA_DEVICE(0x4682, info), \ + INTEL_VGA_DEVICE(0x4683, info), \ + INTEL_VGA_DEVICE(0x4690, info), \ + INTEL_VGA_DEVICE(0x4691, info), \ + INTEL_VGA_DEVICE(0x4692, info), \ + INTEL_VGA_DEVICE(0x4693, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index e17be324d95f..4fb523dfab32 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -44,9 +44,9 @@ #include "ttm_resource.h" -struct ttm_bo_global; +struct ttm_global; -struct ttm_bo_device; +struct ttm_device; struct dma_buf_map; @@ -88,7 +88,6 @@ struct ttm_tt; * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. - * @acc_size: Accounted size for this object. * @kref: Reference count of this buffer object. When this refcount reaches * zero, the object is destroyed or put on the delayed delete list. * @mem: structure describing current placement. @@ -122,10 +121,9 @@ struct ttm_buffer_object { * Members constant at init. */ - struct ttm_bo_device *bdev; + struct ttm_device *bdev; enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); - size_t acc_size; /** * Members not needing protection. @@ -313,7 +311,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an - * object. This function must be called with struct ttm_bo_global::lru_lock + * object. This function must be called with struct ttm_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, @@ -326,7 +324,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, * @bulk: bulk move structure * * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that - * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + * BO order never changes. Should be called with ttm_global::lru_lock held. */ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); @@ -337,14 +335,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); * Returns * True if the workqueue was queued at the time */ -int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); +int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); +void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); /** * ttm_bo_eviction_valuable @@ -357,21 +355,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place); -size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size); - /** * ttm_bo_init_reserved * - * @bdev: Pointer to a ttm_bo_device struct. + * @bdev: Pointer to a ttm_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. * @size: Requested size of buffer object. * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. * @ctx: TTM operation context for memory allocation. - * @acc_size: Accounted size for this object. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * @@ -396,20 +389,19 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -int ttm_bo_init_reserved(struct ttm_bo_device *bdev, +int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, struct ttm_operation_ctx *ctx, - size_t acc_size, struct sg_table *sg, - struct dma_resv *resv, + struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_init * - * @bdev: Pointer to a ttm_bo_device struct. + * @bdev: Pointer to a ttm_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. * @size: Requested size of buffer object. * @type: Requested type of buffer object. @@ -421,7 +413,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * holds a pointer to a persistent shmem object. Typically, this would * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. - * @acc_size: Accounted size for this object. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * @@ -443,10 +434,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, +int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, - uint32_t page_alignment, bool interrubtible, size_t acc_size, + uint32_t page_alignment, bool interrubtible, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); @@ -537,18 +528,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); * * @filp: filp as input from the mmap method. * @vma: vma as input from the mmap method. - * @bdev: Pointer to the ttm_bo_device with the address space manager. + * @bdev: Pointer to the ttm_device with the address space manager. * * This function is intended to be called by the device mmap method. * if the device address space is to be backed by the bo manager. */ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev); + struct ttm_device *bdev); /** * ttm_bo_io * - * @bdev: Pointer to the struct ttm_bo_device. + * @bdev: Pointer to the struct ttm_device. * @filp: Pointer to the struct file attempting to read / write. * @wbuf: User-space pointer to address of buffer to write. NULL on read. * @rbuf: User-space pointer to address of buffer to read into. @@ -565,11 +556,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * the function may return -ERESTARTSYS if * interrupted by a signal. */ -ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, +ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write); -int ttm_bo_swapout(struct ttm_operation_ctx *ctx); +int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); /** * ttm_bo_uses_embedded_gem_object - check if the given bo uses the @@ -617,7 +608,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) --bo->pin_count; } -int ttm_mem_evict_first(struct ttm_bo_device *bdev, +int ttm_mem_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, @@ -642,5 +633,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma); int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); +bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 423348414c59..8959c0075cfd 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -37,302 +37,14 @@ #include <linux/spinlock.h> #include <linux/dma-resv.h> +#include <drm/ttm/ttm_device.h> + #include "ttm_bo_api.h" -#include "ttm_memory.h" #include "ttm_placement.h" #include "ttm_tt.h" #include "ttm_pool.h" /** - * struct ttm_bo_driver - * - * @create_ttm_backend_entry: Callback to create a struct ttm_backend. - * @evict_flags: Callback to obtain placement flags when a buffer is evicted. - * @move: Callback for a driver to hook in accelerated functions to - * move a buffer. - * If set to NULL, a potentially slow memcpy() move is used. - */ - -struct ttm_bo_driver { - /** - * ttm_tt_create - * - * @bo: The buffer object to create the ttm for. - * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. - * - * Create a struct ttm_tt to back data with system memory pages. - * No pages are actually allocated. - * Returns: - * NULL: Out of memory. - */ - struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, - uint32_t page_flags); - - /** - * ttm_tt_populate - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Allocate all backing pages - * Returns: - * -ENOMEM: Out of memory. - */ - int (*ttm_tt_populate)(struct ttm_bo_device *bdev, - struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx); - - /** - * ttm_tt_unpopulate - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Free all backing page - */ - void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - - /** - * ttm_tt_destroy - * - * @bdev: Pointer to a ttm device - * @ttm: Pointer to a struct ttm_tt. - * - * Destroy the backend. This will be call back from ttm_tt_destroy so - * don't call ttm_tt_destroy from the callback or infinite loop. - */ - void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - - /** - * struct ttm_bo_driver member eviction_valuable - * - * @bo: the buffer object to be evicted - * @place: placement we need room for - * - * Check with the driver if it is valuable to evict a BO to make room - * for a certain placement. - */ - bool (*eviction_valuable)(struct ttm_buffer_object *bo, - const struct ttm_place *place); - /** - * struct ttm_bo_driver member evict_flags: - * - * @bo: the buffer object to be evicted - * - * Return the bo flags for a buffer which is not mapped to the hardware. - * These will be placed in proposed_flags so that when the move is - * finished, they'll end up in bo->mem.flags - * This should not cause multihop evictions, and the core will warn - * if one is proposed. - */ - - void (*evict_flags)(struct ttm_buffer_object *bo, - struct ttm_placement *placement); - - /** - * struct ttm_bo_driver member move: - * - * @bo: the buffer to move - * @evict: whether this motion is evicting the buffer from - * the graphics address space - * @ctx: context for this move with parameters - * @new_mem: the new memory region receiving the buffer - @ @hop: placement for driver directed intermediate hop - * - * Move a buffer between two memory regions. - * Returns errno -EMULTIHOP if driver requests a hop - */ - int (*move)(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem, - struct ttm_place *hop); - - /** - * struct ttm_bo_driver_member verify_access - * - * @bo: Pointer to a buffer object. - * @filp: Pointer to a struct file trying to access the object. - * - * Called from the map / write / read methods to verify that the - * caller is permitted to access the buffer object. - * This member may be set to NULL, which will refuse this kind of - * access for all buffer objects. - * This function should return 0 if access is granted, -EPERM otherwise. - */ - int (*verify_access)(struct ttm_buffer_object *bo, - struct file *filp); - - /** - * Hook to notify driver about a resource delete. - */ - void (*delete_mem_notify)(struct ttm_buffer_object *bo); - - /** - * notify the driver that we're about to swap out this bo - */ - void (*swap_notify)(struct ttm_buffer_object *bo); - - /** - * Driver callback on when mapping io memory (for bo_move_memcpy - * for instance). TTM will take care to call io_mem_free whenever - * the mapping is not use anymore. io_mem_reserve & io_mem_free - * are balanced. - */ - int (*io_mem_reserve)(struct ttm_bo_device *bdev, - struct ttm_resource *mem); - void (*io_mem_free)(struct ttm_bo_device *bdev, - struct ttm_resource *mem); - - /** - * Return the pfn for a given page_offset inside the BO. - * - * @bo: the BO to look up the pfn for - * @page_offset: the offset to look up - */ - unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, - unsigned long page_offset); - - /** - * Read/write memory buffers for ptrace access - * - * @bo: the BO to access - * @offset: the offset from the start of the BO - * @buf: pointer to source/destination buffer - * @len: number of bytes to copy - * @write: whether to read (0) from or write (non-0) to BO - * - * If successful, this function should return the number of - * bytes copied, -EIO otherwise. If the number of bytes - * returned is < len, the function may be called again with - * the remainder of the buffer to copy. - */ - int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write); - - /** - * struct ttm_bo_driver member del_from_lru_notify - * - * @bo: the buffer object deleted from lru - * - * notify driver that a BO was deleted from LRU. - */ - void (*del_from_lru_notify)(struct ttm_buffer_object *bo); - - /** - * Notify the driver that we're about to release a BO - * - * @bo: BO that is about to be released - * - * Gives the driver a chance to do any cleanup, including - * adding fences that may force a delayed delete - */ - void (*release_notify)(struct ttm_buffer_object *bo); -}; - -/** - * struct ttm_bo_global - Buffer object driver global data. - * - * @dummy_read_page: Pointer to a dummy page used for mapping requests - * of unpopulated pages. - * @shrink: A shrink callback object used for buffer object swap. - * @device_list_mutex: Mutex protecting the device list. - * This mutex is held while traversing the device list for pm options. - * @lru_lock: Spinlock protecting the bo subsystem lru lists. - * @device_list: List of buffer object devices. - * @swap_lru: Lru list of buffer objects used for swapping. - */ - -extern struct ttm_bo_global { - - /** - * Constant after init. - */ - - struct kobject kobj; - struct page *dummy_read_page; - spinlock_t lru_lock; - - /** - * Protected by ttm_global_mutex. - */ - struct list_head device_list; - - /** - * Protected by the lru_lock. - */ - struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; - - /** - * Internal protection. - */ - atomic_t bo_count; -} ttm_bo_glob; - - -#define TTM_NUM_MEM_TYPES 8 - -/** - * struct ttm_bo_device - Buffer object driver device-specific data. - * - * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. - * @man: An array of resource_managers. - * @vma_manager: Address space manager (pointer) - * lru_lock: Spinlock that protects the buffer+device lru lists and - * ddestroy lists. - * @dev_mapping: A pointer to the struct address_space representing the - * device address space. - * @wq: Work queue structure for the delayed delete workqueue. - * - */ - -struct ttm_bo_device { - - /* - * Constant after bo device init / atomic. - */ - struct list_head device_list; - struct ttm_bo_driver *driver; - /* - * access via ttm_manager_type. - */ - struct ttm_resource_manager sysman; - struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; - /* - * Protected by internal locks. - */ - struct drm_vma_offset_manager *vma_manager; - struct ttm_pool pool; - - /* - * Protected by the global:lru lock. - */ - struct list_head ddestroy; - - /* - * Protected by load / firstopen / lastclose /unload sync. - */ - - struct address_space *dev_mapping; - - /* - * Internal protection. - */ - - struct delayed_work wq; -}; - -static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, - int mem_type) -{ - return bdev->man_drv[mem_type]; -} - -static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, - int type, - struct ttm_resource_manager *manager) -{ - bdev->man_drv[type] = manager; -} - -/** * struct ttm_lru_bulk_move_pos * * @first: first BO in the bulk move range @@ -388,31 +100,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -int ttm_bo_device_release(struct ttm_bo_device *bdev); - -/** - * ttm_bo_device_init - * - * @bdev: A pointer to a struct ttm_bo_device to initialize. - * @glob: A pointer to an initialized struct ttm_bo_global. - * @driver: A pointer to a struct ttm_bo_driver set up by the caller. - * @dev: The core kernel device pointer for DMA mappings and allocations. - * @mapping: The address space to use for this bo. - * @vma_manager: A pointer to a vma manager. - * @use_dma_alloc: If coherent DMA allocation API should be used. - * @use_dma32: If we should use GFP_DMA32 for device memory allocations. - * - * Initializes a struct ttm_bo_device: - * Returns: - * !0: Failure. - */ -int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_driver *driver, - struct device *dev, - struct address_space *mapping, - struct drm_vma_offset_manager *vma_manager, - bool use_dma_alloc, bool use_dma32); - /** * ttm_bo_unmap_virtual * @@ -494,9 +181,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); } static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, @@ -538,9 +225,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) /* * ttm_bo_util.c */ -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, +int ttm_mem_io_reserve(struct ttm_device *bdev, struct ttm_resource *mem); -void ttm_mem_io_free(struct ttm_bo_device *bdev, +void ttm_mem_io_free(struct ttm_device *bdev, struct ttm_resource *mem); /** @@ -631,7 +318,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. */ -int ttm_range_man_init(struct ttm_bo_device *bdev, +int ttm_range_man_init(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size); @@ -643,7 +330,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, * * Remove the generic range manager from a slot and tear it down. */ -int ttm_range_man_fini(struct ttm_bo_device *bdev, +int ttm_range_man_fini(struct ttm_device *bdev, unsigned type); #endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h new file mode 100644 index 000000000000..035bbc044a3b --- /dev/null +++ b/include/drm/ttm/ttm_device.h @@ -0,0 +1,318 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_DEVICE_H_ +#define _TTM_DEVICE_H_ + +#include <linux/types.h> +#include <linux/workqueue.h> +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_pool.h> + +#define TTM_NUM_MEM_TYPES 8 + +struct ttm_device; +struct ttm_placement; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +/** + * struct ttm_global - Buffer object driver global data. + * + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffer object swap. + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ +extern struct ttm_global { + + /** + * Constant after init. + */ + + struct page *dummy_read_page; + spinlock_t lru_lock; + + /** + * Protected by ttm_global_mutex. + */ + struct list_head device_list; + + /** + * Protected by the lru_lock. + */ + struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; + + /** + * Internal protection. + */ + atomic_t bo_count; +} ttm_glob; + +struct ttm_device_funcs { + /** + * ttm_tt_create + * + * @bo: The buffer object to create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, + uint32_t page_flags); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_device *bdev, + struct ttm_tt *ttm); + + /** + * ttm_tt_destroy + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member eviction_valuable + * + * @bo: the buffer object to be evicted + * @place: placement we need room for + * + * Check with the driver if it is valuable to evict a BO to make room + * for a certain placement. + */ + bool (*eviction_valuable)(struct ttm_buffer_object *bo, + const struct ttm_place *place); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + * This should not cause multihop evictions, and the core will warn + * if one is proposed. + */ + + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @ctx: context for this move with parameters + * @new_mem: the new memory region receiving the buffer + @ @hop: placement for driver directed intermediate hop + * + * Move a buffer between two memory regions. + * Returns errno -EMULTIHOP if driver requests a hop + */ + int (*move)(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem, + struct ttm_place *hop); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access)(struct ttm_buffer_object *bo, + struct file *filp); + + /** + * Hook to notify driver about a resource delete. + */ + void (*delete_mem_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify)(struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_device *bdev, + struct ttm_resource *mem); + void (*io_mem_free)(struct ttm_device *bdev, + struct ttm_resource *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); + + /** + * struct ttm_bo_driver member del_from_lru_notify + * + * @bo: the buffer object deleted from lru + * + * notify driver that a BO was deleted from LRU. + */ + void (*del_from_lru_notify)(struct ttm_buffer_object *bo); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); +}; + +/** + * struct ttm_device - Buffer object driver device-specific data. + * + * @device_list: Our entry in the global device list. + * @funcs: Function table for the device. + * @sysman: Resource manager for the system domain. + * @man_drv: An array of resource_managers. + * @vma_manager: Address space manager. + * @pool: page pool for the device. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + */ +struct ttm_device { + /* + * Constant after bo device init + */ + struct list_head device_list; + struct ttm_device_funcs *funcs; + + /* + * Access via ttm_manager_type. + */ + struct ttm_resource_manager sysman; + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; + + /* + * Protected by internal locks. + */ + struct drm_vma_offset_manager *vma_manager; + struct ttm_pool pool; + + /* + * Protected by the global:lru lock. + */ + struct list_head ddestroy; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + struct delayed_work wq; +}; + +static inline struct ttm_resource_manager * +ttm_manager_type(struct ttm_device *bdev, int mem_type) +{ + return bdev->man_drv[mem_type]; +} + +static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, + struct ttm_resource_manager *manager) +{ + bdev->man_drv[type] = manager; +} + +int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, + struct device *dev, struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, + bool use_dma_alloc, bool use_dma32); +void ttm_device_fini(struct ttm_device *bdev); + +#endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h deleted file mode 100644 index c1f167881e33..000000000000 --- a/include/drm/ttm/ttm_memory.h +++ /dev/null @@ -1,95 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef TTM_MEMORY_H -#define TTM_MEMORY_H - -#include <linux/workqueue.h> -#include <linux/spinlock.h> -#include <linux/bug.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kobject.h> -#include <linux/mm.h> -#include "ttm_bo_api.h" - -/** - * struct ttm_mem_global - Global memory accounting structure. - * - * @shrink: A single callback to shrink TTM memory usage. Extend this - * to a linked list to be able to handle multiple callbacks when needed. - * @swap_queue: A workqueue to handle shrinking in low memory situations. We - * need a separate workqueue since it will spend a lot of time waiting - * for the GPU, and this will otherwise block other workqueue tasks(?) - * At this point we use only a single-threaded workqueue. - * @work: The workqueue callback for the shrink queue. - * @lock: Lock to protect the @shrink - and the memory accounting members, - * that is, essentially the whole structure with some exceptions. - * @lower_mem_limit: include lower limit of swap space and lower limit of - * system memory. - * @zones: Array of pointers to accounting zones. - * @num_zones: Number of populated entries in the @zones array. - * @zone_kernel: Pointer to the kernel zone. - * @zone_highmem: Pointer to the highmem zone if there is one. - * @zone_dma32: Pointer to the dma32 zone if there is one. - * - * Note that this structure is not per device. It should be global for all - * graphics devices. - */ - -#define TTM_MEM_MAX_ZONES 2 -struct ttm_mem_zone; -extern struct ttm_mem_global { - struct kobject kobj; - struct workqueue_struct *swap_queue; - struct work_struct work; - spinlock_t lock; - uint64_t lower_mem_limit; - struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; - unsigned int num_zones; - struct ttm_mem_zone *zone_kernel; -#ifdef CONFIG_HIGHMEM - struct ttm_mem_zone *zone_highmem; -#else - struct ttm_mem_zone *zone_dma32; -#endif -} ttm_mem_glob; - -int ttm_mem_global_init(struct ttm_mem_global *glob); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); -bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, - struct ttm_operation_ctx *ctx); -#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index da0ed7e8c915..6164ccf4f308 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -33,7 +33,7 @@ #define TTM_MAX_BO_PRIORITY 4U -struct ttm_bo_device; +struct ttm_device; struct ttm_resource_manager; struct ttm_resource; struct ttm_place; @@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); -int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, +int ttm_resource_manager_evict_all(struct ttm_device *bdev, struct ttm_resource_manager *man); void ttm_resource_manager_debug(struct ttm_resource_manager *man, diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 6c8eb9a4de81..069f8130241a 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -30,6 +30,7 @@ #include <linux/types.h> #include <drm/ttm/ttm_caching.h> +struct ttm_bo_device; struct ttm_tt; struct ttm_resource; struct ttm_buffer_object; @@ -118,14 +119,14 @@ void ttm_tt_fini(struct ttm_tt *ttm); * * Unbind, unpopulate and destroy common struct ttm_tt. */ -void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); /** * ttm_tt_destroy_common: * * Called from driver to destroy common path. */ -void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -135,7 +136,8 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); * Swap in a previously swap out ttm_tt. */ int ttm_tt_swapin(struct ttm_tt *ttm); -int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, + gfp_t gfp_flags); /** * ttm_tt_populate - allocate pages for a ttm @@ -144,7 +146,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); * * Calls the driver method to allocate pages for a ttm */ -int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate - free pages from a ttm @@ -153,7 +155,10 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o * * Calls the driver method to free all pages from a ttm */ -void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); + +int ttm_tt_mgr_init(void); +void ttm_tt_mgr_fini(void); #if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h index e2749dbc74b8..93752ea107e3 100644 --- a/include/dt-bindings/clock/axg-clkc.h +++ b/include/dt-bindings/clock/axg-clkc.h @@ -70,7 +70,6 @@ #define CLKID_HIFI_PLL 69 #define CLKID_PCIE_CML_EN0 79 #define CLKID_PCIE_CML_EN1 80 -#define CLKID_MIPI_ENABLE 81 #define CLKID_GEN_CLK 84 #define CLKID_VPU_0_SEL 92 #define CLKID_VPU_0 93 diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h index 673a8c662340..82b1fc8d1ee0 100644 --- a/include/dt-bindings/clock/imx8-clock.h +++ b/include/dt-bindings/clock/imx8-clock.h @@ -64,6 +64,8 @@ #define IMX_DC0_PLL1_CLK 81 #define IMX_DC0_DISP0_CLK 82 #define IMX_DC0_DISP1_CLK 83 +#define IMX_DC0_BYPASS0_CLK 84 +#define IMX_DC0_BYPASS1_CLK 85 /* MIPI-LVDS SS */ #define IMX_MIPI_IPG_CLK 90 diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h index e63a5530aed7..47c6f7f9582c 100644 --- a/include/dt-bindings/clock/imx8mm-clock.h +++ b/include/dt-bindings/clock/imx8mm-clock.h @@ -274,6 +274,14 @@ #define IMX8MM_CLK_A53_CORE 251 -#define IMX8MM_CLK_END 252 +#define IMX8MM_CLK_CLKOUT1_SEL 252 +#define IMX8MM_CLK_CLKOUT1_DIV 253 +#define IMX8MM_CLK_CLKOUT1 254 +#define IMX8MM_CLK_CLKOUT2_SEL 255 +#define IMX8MM_CLK_CLKOUT2_DIV 256 +#define IMX8MM_CLK_CLKOUT2 257 + + +#define IMX8MM_CLK_END 258 #endif diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index 621ea0e87c67..d24b627cb2e7 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -234,6 +234,13 @@ #define IMX8MN_CLK_A53_CORE 214 -#define IMX8MN_CLK_END 215 +#define IMX8MN_CLK_CLKOUT1_SEL 215 +#define IMX8MN_CLK_CLKOUT1_DIV 216 +#define IMX8MN_CLK_CLKOUT1 217 +#define IMX8MN_CLK_CLKOUT2_SEL 218 +#define IMX8MN_CLK_CLKOUT2_DIV 219 +#define IMX8MN_CLK_CLKOUT2 220 + +#define IMX8MN_CLK_END 221 #endif diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index 9b8045d75b8b..82e907ce7bdd 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -431,6 +431,20 @@ #define IMX8MQ_CLK_A53_CORE 289 -#define IMX8MQ_CLK_END 290 +#define IMX8MQ_CLK_MON_AUDIO_PLL1_DIV 290 +#define IMX8MQ_CLK_MON_AUDIO_PLL2_DIV 291 +#define IMX8MQ_CLK_MON_VIDEO_PLL1_DIV 292 +#define IMX8MQ_CLK_MON_GPU_PLL_DIV 293 +#define IMX8MQ_CLK_MON_VPU_PLL_DIV 294 +#define IMX8MQ_CLK_MON_ARM_PLL_DIV 295 +#define IMX8MQ_CLK_MON_SYS_PLL1_DIV 296 +#define IMX8MQ_CLK_MON_SYS_PLL2_DIV 297 +#define IMX8MQ_CLK_MON_SYS_PLL3_DIV 298 +#define IMX8MQ_CLK_MON_DRAM_PLL_DIV 299 +#define IMX8MQ_CLK_MON_VIDEO_PLL2_DIV 300 +#define IMX8MQ_CLK_MON_SEL 301 +#define IMX8MQ_CLK_MON_CLK2_OUT 302 + +#define IMX8MQ_CLK_END 303 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/k210-clk.h b/include/dt-bindings/clock/k210-clk.h index a48176ad3c23..b2de702cbf75 100644 --- a/include/dt-bindings/clock/k210-clk.h +++ b/include/dt-bindings/clock/k210-clk.h @@ -9,7 +9,6 @@ /* * Kendryte K210 SoC clock identifiers (arbitrary values). */ -#define K210_CLK_ACLK 0 #define K210_CLK_CPU 0 #define K210_CLK_SRAM0 1 #define K210_CLK_SRAM1 2 diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index 4c5965ae1df4..f33781338eda 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -6,8 +6,6 @@ #ifndef __MESON8B_CLKC_H #define __MESON8B_CLKC_H -#define CLKID_UNUSED 0 -#define CLKID_XTAL 1 #define CLKID_PLL_FIXED 2 #define CLKID_PLL_VID 3 #define CLKID_PLL_SYS 4 diff --git a/include/dt-bindings/clock/mstar-msc313-mpll.h b/include/dt-bindings/clock/mstar-msc313-mpll.h new file mode 100644 index 000000000000..1b30b02317b6 --- /dev/null +++ b/include/dt-bindings/clock/mstar-msc313-mpll.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Output definitions for the MStar/SigmaStar MPLL + * + * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp> + */ + +#ifndef _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H +#define _DT_BINDINGS_CLOCK_MSTAR_MSC313_MPLL_H + +#define MSTAR_MSC313_MPLL_DIV2 1 +#define MSTAR_MSC313_MPLL_DIV3 2 +#define MSTAR_MSC313_MPLL_DIV4 3 +#define MSTAR_MSC313_MPLL_DIV5 4 +#define MSTAR_MSC313_MPLL_DIV6 5 +#define MSTAR_MSC313_MPLL_DIV7 6 +#define MSTAR_MSC313_MPLL_DIV10 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h index 6a73a174f049..72c99e486d86 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8998.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -184,6 +184,8 @@ #define GCC_MSS_MNOC_BIMC_AXI_CLK 175 #define GCC_BIMC_GFX_CLK 176 #define UFS_UNIPRO_CORE_CLK_SRC 177 +#define GCC_MMSS_GPLL0_CLK 178 +#define HMSS_GPLL0_CLK_SRC 179 #define PCIE_0_GDSC 0 #define UFS_GDSC 1 diff --git a/include/dt-bindings/clock/qcom,gcc-sc7280.h b/include/dt-bindings/clock/qcom,gcc-sc7280.h new file mode 100644 index 000000000000..4394f15111c6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sc7280.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SC7280_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL0_OUT_ODD 2 +#define GCC_GPLL1 3 +#define GCC_GPLL10 4 +#define GCC_GPLL4 5 +#define GCC_GPLL9 6 +#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 7 +#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 8 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 9 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10 +#define GCC_CAMERA_AHB_CLK 11 +#define GCC_CAMERA_HF_AXI_CLK 12 +#define GCC_CAMERA_SF_AXI_CLK 13 +#define GCC_CAMERA_XO_CLK 14 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 15 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 16 +#define GCC_CPUSS_AHB_CLK 17 +#define GCC_CPUSS_AHB_CLK_SRC 18 +#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 19 +#define GCC_DDRSS_GPU_AXI_CLK 20 +#define GCC_DDRSS_PCIE_SF_CLK 21 +#define GCC_DISP_AHB_CLK 22 +#define GCC_DISP_GPLL0_CLK_SRC 23 +#define GCC_DISP_HF_AXI_CLK 24 +#define GCC_DISP_SF_AXI_CLK 25 +#define GCC_DISP_XO_CLK 26 +#define GCC_GP1_CLK 27 +#define GCC_GP1_CLK_SRC 28 +#define GCC_GP2_CLK 29 +#define GCC_GP2_CLK_SRC 30 +#define GCC_GP3_CLK 31 +#define GCC_GP3_CLK_SRC 32 +#define GCC_GPU_CFG_AHB_CLK 33 +#define GCC_GPU_GPLL0_CLK_SRC 34 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 35 +#define GCC_GPU_IREF_EN 36 +#define GCC_GPU_MEMNOC_GFX_CLK 37 +#define GCC_GPU_SNOC_DVM_GFX_CLK 38 +#define GCC_PCIE0_PHY_RCHNG_CLK 39 +#define GCC_PCIE1_PHY_RCHNG_CLK 40 +#define GCC_PCIE_0_AUX_CLK 41 +#define GCC_PCIE_0_AUX_CLK_SRC 42 +#define GCC_PCIE_0_CFG_AHB_CLK 43 +#define GCC_PCIE_0_MSTR_AXI_CLK 44 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 45 +#define GCC_PCIE_0_PIPE_CLK 46 +#define GCC_PCIE_0_PIPE_CLK_SRC 47 +#define GCC_PCIE_0_SLV_AXI_CLK 48 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 49 +#define GCC_PCIE_1_AUX_CLK 50 +#define GCC_PCIE_1_AUX_CLK_SRC 51 +#define GCC_PCIE_1_CFG_AHB_CLK 52 +#define GCC_PCIE_1_MSTR_AXI_CLK 53 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54 +#define GCC_PCIE_1_PIPE_CLK 55 +#define GCC_PCIE_1_PIPE_CLK_SRC 56 +#define GCC_PCIE_1_SLV_AXI_CLK 57 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58 +#define GCC_PCIE_THROTTLE_CORE_CLK 59 +#define GCC_PDM2_CLK 60 +#define GCC_PDM2_CLK_SRC 61 +#define GCC_PDM_AHB_CLK 62 +#define GCC_PDM_XO4_CLK 63 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 64 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 65 +#define GCC_QMIP_DISP_AHB_CLK 66 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68 +#define GCC_QUPV3_WRAP0_CORE_CLK 69 +#define GCC_QUPV3_WRAP0_S0_CLK 70 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S1_CLK 72 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S2_CLK 74 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S3_CLK 76 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_S4_CLK 78 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79 +#define GCC_QUPV3_WRAP0_S5_CLK 80 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81 +#define GCC_QUPV3_WRAP0_S6_CLK 82 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83 +#define GCC_QUPV3_WRAP0_S7_CLK 84 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 86 +#define GCC_QUPV3_WRAP1_CORE_CLK 87 +#define GCC_QUPV3_WRAP1_S0_CLK 88 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S1_CLK 90 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91 +#define GCC_QUPV3_WRAP1_S2_CLK 92 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93 +#define GCC_QUPV3_WRAP1_S3_CLK 94 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95 +#define GCC_QUPV3_WRAP1_S4_CLK 96 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S5_CLK 98 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99 +#define GCC_QUPV3_WRAP1_S6_CLK 100 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101 +#define GCC_QUPV3_WRAP1_S7_CLK 102 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 106 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 107 +#define GCC_SDCC1_AHB_CLK 108 +#define GCC_SDCC1_APPS_CLK 109 +#define GCC_SDCC1_APPS_CLK_SRC 110 +#define GCC_SDCC1_ICE_CORE_CLK 111 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 112 +#define GCC_SDCC2_AHB_CLK 113 +#define GCC_SDCC2_APPS_CLK 114 +#define GCC_SDCC2_APPS_CLK_SRC 115 +#define GCC_SDCC4_AHB_CLK 116 +#define GCC_SDCC4_APPS_CLK 117 +#define GCC_SDCC4_APPS_CLK_SRC 118 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 119 +#define GCC_THROTTLE_PCIE_AHB_CLK 120 +#define GCC_TITAN_NRT_THROTTLE_CORE_CLK 121 +#define GCC_TITAN_RT_THROTTLE_CORE_CLK 122 +#define GCC_UFS_1_CLKREF_EN 123 +#define GCC_UFS_PHY_AHB_CLK 124 +#define GCC_UFS_PHY_AXI_CLK 125 +#define GCC_UFS_PHY_AXI_CLK_SRC 126 +#define GCC_UFS_PHY_ICE_CORE_CLK 127 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 128 +#define GCC_UFS_PHY_PHY_AUX_CLK 129 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 130 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 131 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 132 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 133 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 134 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 135 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 136 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 137 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 138 +#define GCC_USB30_PRIM_MASTER_CLK 139 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 140 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 141 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 142 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 143 +#define GCC_USB30_PRIM_SLEEP_CLK 144 +#define GCC_USB30_SEC_MASTER_CLK 145 +#define GCC_USB30_SEC_MASTER_CLK_SRC 146 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 147 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 148 +#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 149 +#define GCC_USB30_SEC_SLEEP_CLK 150 +#define GCC_USB3_PRIM_PHY_AUX_CLK 151 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 152 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 153 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 154 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 155 +#define GCC_USB3_SEC_PHY_AUX_CLK 156 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 157 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 158 +#define GCC_USB3_SEC_PHY_PIPE_CLK 159 +#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 160 +#define GCC_VIDEO_AHB_CLK 161 +#define GCC_VIDEO_AXI0_CLK 162 +#define GCC_VIDEO_MVP_THROTTLE_CORE_CLK 163 +#define GCC_VIDEO_XO_CLK 164 +#define GCC_GPLL0_MAIN_DIV_CDIV 165 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 166 +#define GCC_QSPI_CORE_CLK 167 +#define GCC_QSPI_CORE_CLK_SRC 168 +#define GCC_CFG_NOC_LPASS_CLK 169 +#define GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC 170 +#define GCC_MSS_CFG_AHB_CLK 171 +#define GCC_MSS_OFFLINE_AXI_CLK 172 +#define GCC_MSS_SNOC_AXI_CLK 173 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 174 +#define GCC_MSS_Q6SS_BOOT_CLK_SRC 175 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 176 +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 177 +#define GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK 178 +#define GCC_PCIE_CLKREF_EN 179 +#define GCC_WPSS_AHB_CLK 180 +#define GCC_WPSS_AHB_BDG_MST_CLK 181 +#define GCC_WPSS_RSCP_CLK 182 +#define GCC_EDP_CLKREF_EN 183 +#define GCC_SEC_CTRL_CLK_SRC 184 + +/* GCC power domains */ +#define GCC_PCIE_0_GDSC 0 +#define GCC_PCIE_1_GDSC 1 +#define GCC_UFS_PHY_GDSC 2 +#define GCC_USB30_PRIM_GDSC 3 +#define GCC_USB30_SEC_GDSC 4 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 5 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 6 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 7 +#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8 +#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9 + +/* GCC resets */ +#define GCC_PCIE_0_BCR 0 +#define GCC_PCIE_0_PHY_BCR 1 +#define GCC_PCIE_1_BCR 2 +#define GCC_PCIE_1_PHY_BCR 3 +#define GCC_QUSB2PHY_PRIM_BCR 4 +#define GCC_QUSB2PHY_SEC_BCR 5 +#define GCC_SDCC1_BCR 6 +#define GCC_SDCC2_BCR 7 +#define GCC_SDCC4_BCR 8 +#define GCC_UFS_PHY_BCR 9 +#define GCC_USB30_PRIM_BCR 10 +#define GCC_USB30_SEC_BCR 11 +#define GCC_USB3_DP_PHY_PRIM_BCR 12 +#define GCC_USB3_PHY_PRIM_BCR 13 +#define GCC_USB3PHY_PHY_PRIM_BCR 14 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h new file mode 100644 index 000000000000..e893415ae13d --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SC8180X_H + +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 3 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4 +#define GCC_AGGRE_USB3_MP_AXI_CLK 5 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 7 +#define GCC_BOOT_ROM_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CFG_NOC_USB3_MP_AXI_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_RBCPR_CLK 16 +#define GCC_DDRSS_GPU_AXI_CLK 17 +#define GCC_DISP_HF_AXI_CLK 18 +#define GCC_DISP_SF_AXI_CLK 19 +#define GCC_EMAC_AXI_CLK 20 +#define GCC_EMAC_PTP_CLK 21 +#define GCC_EMAC_PTP_CLK_SRC 22 +#define GCC_EMAC_RGMII_CLK 23 +#define GCC_EMAC_RGMII_CLK_SRC 24 +#define GCC_EMAC_SLV_AHB_CLK 25 +#define GCC_GP1_CLK 26 +#define GCC_GP1_CLK_SRC 27 +#define GCC_GP2_CLK 28 +#define GCC_GP2_CLK_SRC 29 +#define GCC_GP3_CLK 30 +#define GCC_GP3_CLK_SRC 31 +#define GCC_GP4_CLK 32 +#define GCC_GP4_CLK_SRC 33 +#define GCC_GP5_CLK 34 +#define GCC_GP5_CLK_SRC 35 +#define GCC_GPU_GPLL0_CLK_SRC 36 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 37 +#define GCC_GPU_MEMNOC_GFX_CLK 38 +#define GCC_GPU_SNOC_DVM_GFX_CLK 39 +#define GCC_NPU_AT_CLK 40 +#define GCC_NPU_AXI_CLK 41 +#define GCC_NPU_AXI_CLK_SRC 42 +#define GCC_NPU_GPLL0_CLK_SRC 43 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 44 +#define GCC_NPU_TRIG_CLK 45 +#define GCC_PCIE0_PHY_REFGEN_CLK 46 +#define GCC_PCIE1_PHY_REFGEN_CLK 47 +#define GCC_PCIE2_PHY_REFGEN_CLK 48 +#define GCC_PCIE3_PHY_REFGEN_CLK 49 +#define GCC_PCIE_0_AUX_CLK 50 +#define GCC_PCIE_0_AUX_CLK_SRC 51 +#define GCC_PCIE_0_CFG_AHB_CLK 52 +#define GCC_PCIE_0_MSTR_AXI_CLK 53 +#define GCC_PCIE_0_PIPE_CLK 54 +#define GCC_PCIE_0_SLV_AXI_CLK 55 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 56 +#define GCC_PCIE_1_AUX_CLK 57 +#define GCC_PCIE_1_AUX_CLK_SRC 58 +#define GCC_PCIE_1_CFG_AHB_CLK 59 +#define GCC_PCIE_1_MSTR_AXI_CLK 60 +#define GCC_PCIE_1_PIPE_CLK 61 +#define GCC_PCIE_1_SLV_AXI_CLK 62 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 63 +#define GCC_PCIE_2_AUX_CLK 64 +#define GCC_PCIE_2_AUX_CLK_SRC 65 +#define GCC_PCIE_2_CFG_AHB_CLK 66 +#define GCC_PCIE_2_MSTR_AXI_CLK 67 +#define GCC_PCIE_2_PIPE_CLK 68 +#define GCC_PCIE_2_SLV_AXI_CLK 69 +#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 70 +#define GCC_PCIE_3_AUX_CLK 71 +#define GCC_PCIE_3_AUX_CLK_SRC 72 +#define GCC_PCIE_3_CFG_AHB_CLK 73 +#define GCC_PCIE_3_MSTR_AXI_CLK 74 +#define GCC_PCIE_3_PIPE_CLK 75 +#define GCC_PCIE_3_SLV_AXI_CLK 76 +#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 77 +#define GCC_PCIE_PHY_AUX_CLK 78 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 79 +#define GCC_PDM2_CLK 80 +#define GCC_PDM2_CLK_SRC 81 +#define GCC_PDM_AHB_CLK 82 +#define GCC_PDM_XO4_CLK 83 +#define GCC_PRNG_AHB_CLK 84 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 86 +#define GCC_QMIP_DISP_AHB_CLK 87 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 88 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89 +#define GCC_QSPI_1_CNOC_PERIPH_AHB_CLK 90 +#define GCC_QSPI_1_CORE_CLK 91 +#define GCC_QSPI_1_CORE_CLK_SRC 92 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 93 +#define GCC_QSPI_CORE_CLK 94 +#define GCC_QSPI_CORE_CLK_SRC 95 +#define GCC_QUPV3_WRAP0_S0_CLK 96 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 97 +#define GCC_QUPV3_WRAP0_S1_CLK 98 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 99 +#define GCC_QUPV3_WRAP0_S2_CLK 100 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 101 +#define GCC_QUPV3_WRAP0_S3_CLK 102 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 103 +#define GCC_QUPV3_WRAP0_S4_CLK 104 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 105 +#define GCC_QUPV3_WRAP0_S5_CLK 106 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 107 +#define GCC_QUPV3_WRAP0_S6_CLK 108 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 109 +#define GCC_QUPV3_WRAP0_S7_CLK 110 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 111 +#define GCC_QUPV3_WRAP1_S0_CLK 112 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 113 +#define GCC_QUPV3_WRAP1_S1_CLK 114 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 115 +#define GCC_QUPV3_WRAP1_S2_CLK 116 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 117 +#define GCC_QUPV3_WRAP1_S3_CLK 118 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 119 +#define GCC_QUPV3_WRAP1_S4_CLK 120 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 121 +#define GCC_QUPV3_WRAP1_S5_CLK 122 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 123 +#define GCC_QUPV3_WRAP2_S0_CLK 124 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 125 +#define GCC_QUPV3_WRAP2_S1_CLK 126 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 127 +#define GCC_QUPV3_WRAP2_S2_CLK 128 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 129 +#define GCC_QUPV3_WRAP2_S3_CLK 130 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 131 +#define GCC_QUPV3_WRAP2_S4_CLK 132 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 133 +#define GCC_QUPV3_WRAP2_S5_CLK 134 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 135 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 136 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 137 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 138 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 139 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 140 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 141 +#define GCC_SDCC2_AHB_CLK 142 +#define GCC_SDCC2_APPS_CLK 143 +#define GCC_SDCC2_APPS_CLK_SRC 144 +#define GCC_SDCC4_AHB_CLK 145 +#define GCC_SDCC4_APPS_CLK 146 +#define GCC_SDCC4_APPS_CLK_SRC 147 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 148 +#define GCC_TSIF_AHB_CLK 149 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 150 +#define GCC_TSIF_REF_CLK 151 +#define GCC_TSIF_REF_CLK_SRC 152 +#define GCC_UFS_CARD_2_AHB_CLK 153 +#define GCC_UFS_CARD_2_AXI_CLK 154 +#define GCC_UFS_CARD_2_AXI_CLK_SRC 155 +#define GCC_UFS_CARD_2_ICE_CORE_CLK 156 +#define GCC_UFS_CARD_2_ICE_CORE_CLK_SRC 157 +#define GCC_UFS_CARD_2_PHY_AUX_CLK 158 +#define GCC_UFS_CARD_2_PHY_AUX_CLK_SRC 159 +#define GCC_UFS_CARD_2_RX_SYMBOL_0_CLK 160 +#define GCC_UFS_CARD_2_RX_SYMBOL_1_CLK 161 +#define GCC_UFS_CARD_2_TX_SYMBOL_0_CLK 162 +#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK 163 +#define GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC 164 +#define GCC_UFS_CARD_AHB_CLK 165 +#define GCC_UFS_CARD_AXI_CLK 166 +#define GCC_UFS_CARD_AXI_CLK_SRC 167 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 168 +#define GCC_UFS_CARD_ICE_CORE_CLK 169 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 170 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 171 +#define GCC_UFS_CARD_PHY_AUX_CLK 172 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 173 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 174 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 175 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 176 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 177 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 178 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 179 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 180 +#define GCC_UFS_PHY_AHB_CLK 181 +#define GCC_UFS_PHY_AXI_CLK 182 +#define GCC_UFS_PHY_AXI_CLK_SRC 183 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 184 +#define GCC_UFS_PHY_ICE_CORE_CLK 185 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 186 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 187 +#define GCC_UFS_PHY_PHY_AUX_CLK 188 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 189 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 190 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 191 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 192 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 193 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 194 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 195 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 196 +#define GCC_USB30_MP_MASTER_CLK 197 +#define GCC_USB30_MP_MASTER_CLK_SRC 198 +#define GCC_USB30_MP_MOCK_UTMI_CLK 199 +#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 200 +#define GCC_USB30_MP_SLEEP_CLK 201 +#define GCC_USB30_PRIM_MASTER_CLK 202 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 203 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 204 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 205 +#define GCC_USB30_PRIM_SLEEP_CLK 206 +#define GCC_USB30_SEC_MASTER_CLK 207 +#define GCC_USB30_SEC_MASTER_CLK_SRC 208 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 209 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 210 +#define GCC_USB30_SEC_SLEEP_CLK 211 +#define GCC_USB3_MP_PHY_AUX_CLK 212 +#define GCC_USB3_MP_PHY_AUX_CLK_SRC 213 +#define GCC_USB3_MP_PHY_COM_AUX_CLK 214 +#define GCC_USB3_MP_PHY_PIPE_0_CLK 215 +#define GCC_USB3_MP_PHY_PIPE_1_CLK 216 +#define GCC_USB3_PRIM_PHY_AUX_CLK 217 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 218 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 219 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 220 +#define GCC_USB3_SEC_PHY_AUX_CLK 221 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 222 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 223 +#define GCC_USB3_SEC_PHY_PIPE_CLK 224 +#define GCC_VIDEO_AXI0_CLK 225 +#define GCC_VIDEO_AXI1_CLK 226 +#define GCC_VIDEO_AXIC_CLK 227 +#define GPLL0 228 +#define GPLL0_OUT_EVEN 229 +#define GPLL1 230 +#define GPLL4 231 +#define GPLL7 232 +#define GCC_PCIE_0_CLKREF_CLK 233 +#define GCC_PCIE_1_CLKREF_CLK 234 +#define GCC_PCIE_2_CLKREF_CLK 235 +#define GCC_PCIE_3_CLKREF_CLK 236 +#define GCC_USB3_PRIM_CLKREF_CLK 237 +#define GCC_USB3_SEC_CLKREF_CLK 238 + +#define GCC_EMAC_BCR 0 +#define GCC_GPU_BCR 1 +#define GCC_MMSS_BCR 2 +#define GCC_NPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_PHY_BCR 5 +#define GCC_PCIE_1_BCR 6 +#define GCC_PCIE_1_PHY_BCR 7 +#define GCC_PCIE_2_BCR 8 +#define GCC_PCIE_2_PHY_BCR 9 +#define GCC_PCIE_3_BCR 10 +#define GCC_PCIE_3_PHY_BCR 11 +#define GCC_PCIE_PHY_BCR 12 +#define GCC_PDM_BCR 13 +#define GCC_PRNG_BCR 14 +#define GCC_QSPI_1_BCR 15 +#define GCC_QSPI_BCR 16 +#define GCC_QUPV3_WRAPPER_0_BCR 17 +#define GCC_QUPV3_WRAPPER_1_BCR 18 +#define GCC_QUPV3_WRAPPER_2_BCR 19 +#define GCC_QUSB2PHY_5_BCR 20 +#define GCC_QUSB2PHY_MP0_BCR 21 +#define GCC_QUSB2PHY_MP1_BCR 22 +#define GCC_QUSB2PHY_PRIM_BCR 23 +#define GCC_QUSB2PHY_SEC_BCR 24 +#define GCC_USB3_PHY_PRIM_SP0_BCR 25 +#define GCC_USB3_PHY_PRIM_SP1_BCR 26 +#define GCC_USB3_DP_PHY_PRIM_SP0_BCR 27 +#define GCC_USB3_DP_PHY_PRIM_SP1_BCR 28 +#define GCC_USB3_PHY_SEC_BCR 29 +#define GCC_USB3PHY_PHY_SEC_BCR 30 +#define GCC_SDCC2_BCR 31 +#define GCC_SDCC4_BCR 32 +#define GCC_TSIF_BCR 33 +#define GCC_UFS_CARD_2_BCR 34 +#define GCC_UFS_CARD_BCR 35 +#define GCC_UFS_PHY_BCR 36 +#define GCC_USB30_MP_BCR 37 +#define GCC_USB30_PRIM_BCR 38 +#define GCC_USB30_SEC_BCR 39 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 40 +#define GCC_VIDEO_AXIC_CLK_BCR 41 +#define GCC_VIDEO_AXI0_CLK_BCR 42 +#define GCC_VIDEO_AXI1_CLK_BCR 43 +#define GCC_USB3_DP_PHY_SEC_BCR 44 + +/* GCC GDSCRs */ +#define EMAC_GDSC 0 +#define PCIE_0_GDSC 1 +#define PCIE_1_GDSC 2 +#define PCIE_2_GDSC 3 +#define PCIE_3_GDSC 4 +#define UFS_CARD_2_GDSC 5 +#define UFS_CARD_GDSC 6 +#define UFS_PHY_GDSC 7 +#define USB30_MP_GDSC 8 +#define USB30_PRIM_GDSC 9 +#define USB30_SEC_GDSC 10 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8350.h b/include/dt-bindings/clock/qcom,gcc-sm8350.h new file mode 100644 index 000000000000..f6be3da5f781 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm8350.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H + +/* GCC HW clocks */ +#define CORE_BI_PLL_TEST_SE 0 +#define PCIE_0_PIPE_CLK 1 +#define PCIE_1_PIPE_CLK 2 +#define UFS_CARD_RX_SYMBOL_0_CLK 3 +#define UFS_CARD_RX_SYMBOL_1_CLK 4 +#define UFS_CARD_TX_SYMBOL_0_CLK 5 +#define UFS_PHY_RX_SYMBOL_0_CLK 6 +#define UFS_PHY_RX_SYMBOL_1_CLK 7 +#define UFS_PHY_TX_SYMBOL_0_CLK 8 +#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 9 +#define USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK 10 + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 11 +#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 12 +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 13 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 14 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 15 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 16 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 17 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 18 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 19 +#define GCC_BOOT_ROM_AHB_CLK 20 +#define GCC_CAMERA_HF_AXI_CLK 21 +#define GCC_CAMERA_SF_AXI_CLK 22 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24 +#define GCC_DDRSS_GPU_AXI_CLK 25 +#define GCC_DDRSS_PCIE_SF_TBU_CLK 26 +#define GCC_DISP_HF_AXI_CLK 27 +#define GCC_DISP_SF_AXI_CLK 28 +#define GCC_GP1_CLK 29 +#define GCC_GP1_CLK_SRC 30 +#define GCC_GP2_CLK 31 +#define GCC_GP2_CLK_SRC 32 +#define GCC_GP3_CLK 33 +#define GCC_GP3_CLK_SRC 34 +#define GCC_GPLL0 35 +#define GCC_GPLL0_OUT_EVEN 36 +#define GCC_GPLL4 37 +#define GCC_GPLL9 38 +#define GCC_GPU_GPLL0_CLK_SRC 39 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 40 +#define GCC_GPU_IREF_EN 41 +#define GCC_GPU_MEMNOC_GFX_CLK 42 +#define GCC_GPU_SNOC_DVM_GFX_CLK 43 +#define GCC_PCIE0_PHY_RCHNG_CLK 44 +#define GCC_PCIE1_PHY_RCHNG_CLK 45 +#define GCC_PCIE_0_AUX_CLK 46 +#define GCC_PCIE_0_AUX_CLK_SRC 47 +#define GCC_PCIE_0_CFG_AHB_CLK 48 +#define GCC_PCIE_0_CLKREF_EN 49 +#define GCC_PCIE_0_MSTR_AXI_CLK 50 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51 +#define GCC_PCIE_0_PIPE_CLK 52 +#define GCC_PCIE_0_PIPE_CLK_SRC 53 +#define GCC_PCIE_0_SLV_AXI_CLK 54 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55 +#define GCC_PCIE_1_AUX_CLK 56 +#define GCC_PCIE_1_AUX_CLK_SRC 57 +#define GCC_PCIE_1_CFG_AHB_CLK 58 +#define GCC_PCIE_1_CLKREF_EN 59 +#define GCC_PCIE_1_MSTR_AXI_CLK 60 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 61 +#define GCC_PCIE_1_PIPE_CLK 62 +#define GCC_PCIE_1_PIPE_CLK_SRC 63 +#define GCC_PCIE_1_SLV_AXI_CLK 64 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65 +#define GCC_PDM2_CLK 66 +#define GCC_PDM2_CLK_SRC 67 +#define GCC_PDM_AHB_CLK 68 +#define GCC_PDM_XO4_CLK 69 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 70 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 71 +#define GCC_QMIP_DISP_AHB_CLK 72 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 74 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 75 +#define GCC_QUPV3_WRAP0_CORE_CLK 76 +#define GCC_QUPV3_WRAP0_S0_CLK 77 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 78 +#define GCC_QUPV3_WRAP0_S1_CLK 79 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S2_CLK 81 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S3_CLK 83 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 84 +#define GCC_QUPV3_WRAP0_S4_CLK 85 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 86 +#define GCC_QUPV3_WRAP0_S5_CLK 87 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 88 +#define GCC_QUPV3_WRAP0_S6_CLK 89 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 90 +#define GCC_QUPV3_WRAP0_S7_CLK 91 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 92 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93 +#define GCC_QUPV3_WRAP1_CORE_CLK 94 +#define GCC_QUPV3_WRAP1_S0_CLK 95 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S1_CLK 97 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S2_CLK 99 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S3_CLK 101 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_S4_CLK 103 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 104 +#define GCC_QUPV3_WRAP1_S5_CLK 105 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 106 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 107 +#define GCC_QUPV3_WRAP2_CORE_CLK 108 +#define GCC_QUPV3_WRAP2_S0_CLK 109 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110 +#define GCC_QUPV3_WRAP2_S1_CLK 111 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_S2_CLK 113 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114 +#define GCC_QUPV3_WRAP2_S3_CLK 115 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_S4_CLK 117 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118 +#define GCC_QUPV3_WRAP2_S5_CLK 119 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126 +#define GCC_SDCC2_AHB_CLK 127 +#define GCC_SDCC2_APPS_CLK 128 +#define GCC_SDCC2_APPS_CLK_SRC 129 +#define GCC_SDCC4_AHB_CLK 130 +#define GCC_SDCC4_APPS_CLK 131 +#define GCC_SDCC4_APPS_CLK_SRC 132 +#define GCC_THROTTLE_PCIE_AHB_CLK 133 +#define GCC_UFS_1_CLKREF_EN 134 +#define GCC_UFS_CARD_AHB_CLK 135 +#define GCC_UFS_CARD_AXI_CLK 136 +#define GCC_UFS_CARD_AXI_CLK_SRC 137 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 138 +#define GCC_UFS_CARD_ICE_CORE_CLK 139 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 140 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 141 +#define GCC_UFS_CARD_PHY_AUX_CLK 142 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 143 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 144 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 145 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 146 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 147 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 148 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 149 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 150 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 151 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 152 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 153 +#define GCC_UFS_PHY_AHB_CLK 154 +#define GCC_UFS_PHY_AXI_CLK 155 +#define GCC_UFS_PHY_AXI_CLK_SRC 156 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157 +#define GCC_UFS_PHY_ICE_CORE_CLK 158 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160 +#define GCC_UFS_PHY_PHY_AUX_CLK 161 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172 +#define GCC_USB30_PRIM_MASTER_CLK 173 +#define GCC_USB30_PRIM_MASTER_CLK__FORCE_MEM_CORE_ON 174 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 175 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 176 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 177 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 178 +#define GCC_USB30_PRIM_SLEEP_CLK 179 +#define GCC_USB30_SEC_MASTER_CLK 180 +#define GCC_USB30_SEC_MASTER_CLK__FORCE_MEM_CORE_ON 181 +#define GCC_USB30_SEC_MASTER_CLK_SRC 182 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 183 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 184 +#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 185 +#define GCC_USB30_SEC_SLEEP_CLK 186 +#define GCC_USB3_PRIM_PHY_AUX_CLK 187 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 188 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 189 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 190 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 191 +#define GCC_USB3_SEC_CLKREF_EN 192 +#define GCC_USB3_SEC_PHY_AUX_CLK 193 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 194 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 195 +#define GCC_USB3_SEC_PHY_PIPE_CLK 196 +#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 197 +#define GCC_VIDEO_AXI0_CLK 198 +#define GCC_VIDEO_AXI1_CLK 199 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_MMSS_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_LINK_DOWN_BCR 5 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6 +#define GCC_PCIE_0_PHY_BCR 7 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8 +#define GCC_PCIE_1_BCR 9 +#define GCC_PCIE_1_LINK_DOWN_BCR 10 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11 +#define GCC_PCIE_1_PHY_BCR 12 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13 +#define GCC_PCIE_PHY_CFG_AHB_BCR 14 +#define GCC_PCIE_PHY_COM_BCR 15 +#define GCC_PDM_BCR 16 +#define GCC_QUPV3_WRAPPER_0_BCR 17 +#define GCC_QUPV3_WRAPPER_1_BCR 18 +#define GCC_QUPV3_WRAPPER_2_BCR 19 +#define GCC_QUSB2PHY_PRIM_BCR 20 +#define GCC_QUSB2PHY_SEC_BCR 21 +#define GCC_SDCC2_BCR 22 +#define GCC_SDCC4_BCR 23 +#define GCC_UFS_CARD_BCR 24 +#define GCC_UFS_PHY_BCR 25 +#define GCC_USB30_PRIM_BCR 26 +#define GCC_USB30_SEC_BCR 27 +#define GCC_USB3_DP_PHY_PRIM_BCR 28 +#define GCC_USB3_DP_PHY_SEC_BCR 29 +#define GCC_USB3_PHY_PRIM_BCR 30 +#define GCC_USB3_PHY_SEC_BCR 31 +#define GCC_USB3PHY_PHY_PRIM_BCR 32 +#define GCC_USB3PHY_PHY_SEC_BCR 33 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 34 +#define GCC_VIDEO_AXI0_CLK_ARES 35 +#define GCC_VIDEO_AXI1_CLK_ARES 36 +#define GCC_VIDEO_BCR 37 + +/* GCC power domains */ +#define PCIE_0_GDSC 0 +#define PCIE_1_GDSC 1 +#define UFS_CARD_GDSC 2 +#define UFS_PHY_GDSC 3 +#define USB30_PRIM_GDSC 4 +#define USB30_SEC_GDSC 5 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 6 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 7 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 8 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 9 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm660.h b/include/dt-bindings/clock/qcom,gpucc-sdm660.h new file mode 100644 index 000000000000..7ea3e53df58c --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sdm660.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020, AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org> + */ + +#ifndef _DT_BINDINGS_CLK_SDM_GPUCC_660_H +#define _DT_BINDINGS_CLK_SDM_GPUCC_660_H + +#define GPUCC_CXO_CLK 0 +#define GPU_PLL0_PLL 1 +#define GPU_PLL1_PLL 2 +#define GFX3D_CLK_SRC 3 +#define RBCPR_CLK_SRC 4 +#define RBBMTIMER_CLK_SRC 5 +#define GPUCC_RBCPR_CLK 6 +#define GPUCC_GFX3D_CLK 7 +#define GPUCC_RBBMTIMER_CLK 8 + +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#define GPU_CX_BCR 0 +#define GPU_GX_BCR 1 +#define RBCPR_BCR 2 +#define SPDM_BCR 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h new file mode 100644 index 000000000000..f9dbc21cb5c7 --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_660_H +#define _DT_BINDINGS_CLK_MSM_MMCC_660_H + +#define AHB_CLK_SRC 0 +#define BYTE0_CLK_SRC 1 +#define BYTE1_CLK_SRC 2 +#define CAMSS_GP0_CLK_SRC 3 +#define CAMSS_GP1_CLK_SRC 4 +#define CCI_CLK_SRC 5 +#define CPP_CLK_SRC 6 +#define CSI0_CLK_SRC 7 +#define CSI0PHYTIMER_CLK_SRC 8 +#define CSI1_CLK_SRC 9 +#define CSI1PHYTIMER_CLK_SRC 10 +#define CSI2_CLK_SRC 11 +#define CSI2PHYTIMER_CLK_SRC 12 +#define CSI3_CLK_SRC 13 +#define CSIPHY_CLK_SRC 14 +#define DP_AUX_CLK_SRC 15 +#define DP_CRYPTO_CLK_SRC 16 +#define DP_GTC_CLK_SRC 17 +#define DP_LINK_CLK_SRC 18 +#define DP_PIXEL_CLK_SRC 19 +#define ESC0_CLK_SRC 20 +#define ESC1_CLK_SRC 21 +#define JPEG0_CLK_SRC 22 +#define MCLK0_CLK_SRC 23 +#define MCLK1_CLK_SRC 24 +#define MCLK2_CLK_SRC 25 +#define MCLK3_CLK_SRC 26 +#define MDP_CLK_SRC 27 +#define MMPLL0_PLL 28 +#define MMPLL10_PLL 29 +#define MMPLL1_PLL 30 +#define MMPLL3_PLL 31 +#define MMPLL4_PLL 32 +#define MMPLL5_PLL 33 +#define MMPLL6_PLL 34 +#define MMPLL7_PLL 35 +#define MMPLL8_PLL 36 +#define BIMC_SMMU_AHB_CLK 37 +#define BIMC_SMMU_AXI_CLK 38 +#define CAMSS_AHB_CLK 39 +#define CAMSS_CCI_AHB_CLK 40 +#define CAMSS_CCI_CLK 41 +#define CAMSS_CPHY_CSID0_CLK 42 +#define CAMSS_CPHY_CSID1_CLK 43 +#define CAMSS_CPHY_CSID2_CLK 44 +#define CAMSS_CPHY_CSID3_CLK 45 +#define CAMSS_CPP_AHB_CLK 46 +#define CAMSS_CPP_AXI_CLK 47 +#define CAMSS_CPP_CLK 48 +#define CAMSS_CPP_VBIF_AHB_CLK 49 +#define CAMSS_CSI0_AHB_CLK 50 +#define CAMSS_CSI0_CLK 51 +#define CAMSS_CSI0PHYTIMER_CLK 52 +#define CAMSS_CSI0PIX_CLK 53 +#define CAMSS_CSI0RDI_CLK 54 +#define CAMSS_CSI1_AHB_CLK 55 +#define CAMSS_CSI1_CLK 56 +#define CAMSS_CSI1PHYTIMER_CLK 57 +#define CAMSS_CSI1PIX_CLK 58 +#define CAMSS_CSI1RDI_CLK 59 +#define CAMSS_CSI2_AHB_CLK 60 +#define CAMSS_CSI2_CLK 61 +#define CAMSS_CSI2PHYTIMER_CLK 62 +#define CAMSS_CSI2PIX_CLK 63 +#define CAMSS_CSI2RDI_CLK 64 +#define CAMSS_CSI3_AHB_CLK 65 +#define CAMSS_CSI3_CLK 66 +#define CAMSS_CSI3PIX_CLK 67 +#define CAMSS_CSI3RDI_CLK 68 +#define CAMSS_CSI_VFE0_CLK 69 +#define CAMSS_CSI_VFE1_CLK 70 +#define CAMSS_CSIPHY0_CLK 71 +#define CAMSS_CSIPHY1_CLK 72 +#define CAMSS_CSIPHY2_CLK 73 +#define CAMSS_GP0_CLK 74 +#define CAMSS_GP1_CLK 75 +#define CAMSS_ISPIF_AHB_CLK 76 +#define CAMSS_JPEG0_CLK 77 +#define CAMSS_JPEG_AHB_CLK 78 +#define CAMSS_JPEG_AXI_CLK 79 +#define CAMSS_MCLK0_CLK 80 +#define CAMSS_MCLK1_CLK 81 +#define CAMSS_MCLK2_CLK 82 +#define CAMSS_MCLK3_CLK 83 +#define CAMSS_MICRO_AHB_CLK 84 +#define CAMSS_TOP_AHB_CLK 85 +#define CAMSS_VFE0_AHB_CLK 86 +#define CAMSS_VFE0_CLK 87 +#define CAMSS_VFE0_STREAM_CLK 88 +#define CAMSS_VFE1_AHB_CLK 89 +#define CAMSS_VFE1_CLK 90 +#define CAMSS_VFE1_STREAM_CLK 91 +#define CAMSS_VFE_VBIF_AHB_CLK 92 +#define CAMSS_VFE_VBIF_AXI_CLK 93 +#define CSIPHY_AHB2CRIF_CLK 94 +#define CXO_CLK 95 +#define MDSS_AHB_CLK 96 +#define MDSS_AXI_CLK 97 +#define MDSS_BYTE0_CLK 98 +#define MDSS_BYTE0_INTF_CLK 99 +#define MDSS_BYTE0_INTF_DIV_CLK 100 +#define MDSS_BYTE1_CLK 101 +#define MDSS_BYTE1_INTF_CLK 102 +#define MDSS_DP_AUX_CLK 103 +#define MDSS_DP_CRYPTO_CLK 104 +#define MDSS_DP_GTC_CLK 105 +#define MDSS_DP_LINK_CLK 106 +#define MDSS_DP_LINK_INTF_CLK 107 +#define MDSS_DP_PIXEL_CLK 108 +#define MDSS_ESC0_CLK 109 +#define MDSS_ESC1_CLK 110 +#define MDSS_HDMI_DP_AHB_CLK 111 +#define MDSS_MDP_CLK 112 +#define MDSS_PCLK0_CLK 113 +#define MDSS_PCLK1_CLK 114 +#define MDSS_ROT_CLK 115 +#define MDSS_VSYNC_CLK 116 +#define MISC_AHB_CLK 117 +#define MISC_CXO_CLK 118 +#define MNOC_AHB_CLK 119 +#define SNOC_DVM_AXI_CLK 120 +#define THROTTLE_CAMSS_AHB_CLK 121 +#define THROTTLE_CAMSS_AXI_CLK 122 +#define THROTTLE_MDSS_AHB_CLK 123 +#define THROTTLE_MDSS_AXI_CLK 124 +#define THROTTLE_VIDEO_AHB_CLK 125 +#define THROTTLE_VIDEO_AXI_CLK 126 +#define VIDEO_AHB_CLK 127 +#define VIDEO_AXI_CLK 128 +#define VIDEO_CORE_CLK 129 +#define VIDEO_SUBCORE0_CLK 130 +#define PCLK0_CLK_SRC 131 +#define PCLK1_CLK_SRC 132 +#define ROT_CLK_SRC 133 +#define VFE0_CLK_SRC 134 +#define VFE1_CLK_SRC 135 +#define VIDEO_CORE_CLK_SRC 136 +#define VSYNC_CLK_SRC 137 +#define MDSS_BYTE1_INTF_DIV_CLK 138 +#define AXI_CLK_SRC 139 + +#define VENUS_GDSC 0 +#define VENUS_CORE0_GDSC 1 +#define MDSS_GDSC 2 +#define CAMSS_TOP_GDSC 3 +#define CAMSS_VFE0_GDSC 4 +#define CAMSS_VFE1_GDSC 5 +#define CAMSS_CPP_GDSC 6 +#define BIMC_SMMU_GDSC 7 + +#define CAMSS_MICRO_BCR 0 + +#endif + diff --git a/include/dt-bindings/clock/qcom,videocc-sm8250.h b/include/dt-bindings/clock/qcom,videocc-sm8250.h index 2b2b3867af25..8d321ac3b1fa 100644 --- a/include/dt-bindings/clock/qcom,videocc-sm8250.h +++ b/include/dt-bindings/clock/qcom,videocc-sm8250.h @@ -16,6 +16,8 @@ #define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 6 #define VIDEO_CC_PLL0 7 #define VIDEO_CC_PLL1 8 +#define VIDEO_CC_MVS0_DIV_CLK_SRC 9 +#define VIDEO_CC_MVS0_CLK 10 /* VIDEO_CC resets */ #define VIDEO_CC_CVP_INTERFACE_BCR 0 diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h index 0a06c5f514d7..83c72a163fd3 100644 --- a/include/dt-bindings/clock/rk3368-cru.h +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -78,6 +78,7 @@ #define SCLK_TIMER13 136 #define SCLK_TIMER14 137 #define SCLK_TIMER15 138 +#define SCLK_VIP_OUT 139 #define DCLK_VOP 190 #define MCLK_CRYPTO 191 @@ -148,6 +149,8 @@ #define PCLK_VIP 367 #define PCLK_WDT 368 #define PCLK_EFUSE256 369 +#define PCLK_DPHYRX 370 +#define PCLK_DPHYTX0 371 /* hclk gates */ #define HCLK_SFC 448 diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h index 76136132a13e..890368d252c4 100644 --- a/include/dt-bindings/clock/sun50i-h6-r-ccu.h +++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h @@ -21,4 +21,6 @@ #define CLK_IR 11 #define CLK_W1 12 +#define CLK_R_APB2_RSB 13 + #endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h new file mode 100644 index 000000000000..4fc08b0df2f3 --- /dev/null +++ b/include/dt-bindings/clock/sun50i-h616-ccu.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (C) 2020 Arm Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_H616_H_ +#define _DT_BINDINGS_CLK_SUN50I_H616_H_ + +#define CLK_PLL_PERIPH0 4 + +#define CLK_CPUX 21 + +#define CLK_APB1 26 + +#define CLK_DE 29 +#define CLK_BUS_DE 30 +#define CLK_DEINTERLACE 31 +#define CLK_BUS_DEINTERLACE 32 +#define CLK_G2D 33 +#define CLK_BUS_G2D 34 +#define CLK_GPU0 35 +#define CLK_BUS_GPU 36 +#define CLK_GPU1 37 +#define CLK_CE 38 +#define CLK_BUS_CE 39 +#define CLK_VE 40 +#define CLK_BUS_VE 41 +#define CLK_BUS_DMA 42 +#define CLK_BUS_HSTIMER 43 +#define CLK_AVS 44 +#define CLK_BUS_DBG 45 +#define CLK_BUS_PSI 46 +#define CLK_BUS_PWM 47 +#define CLK_BUS_IOMMU 48 + +#define CLK_MBUS_DMA 50 +#define CLK_MBUS_VE 51 +#define CLK_MBUS_CE 52 +#define CLK_MBUS_TS 53 +#define CLK_MBUS_NAND 54 +#define CLK_MBUS_G2D 55 + +#define CLK_NAND0 57 +#define CLK_NAND1 58 +#define CLK_BUS_NAND 59 +#define CLK_MMC0 60 +#define CLK_MMC1 61 +#define CLK_MMC2 62 +#define CLK_BUS_MMC0 63 +#define CLK_BUS_MMC1 64 +#define CLK_BUS_MMC2 65 +#define CLK_BUS_UART0 66 +#define CLK_BUS_UART1 67 +#define CLK_BUS_UART2 68 +#define CLK_BUS_UART3 69 +#define CLK_BUS_UART4 70 +#define CLK_BUS_UART5 71 +#define CLK_BUS_I2C0 72 +#define CLK_BUS_I2C1 73 +#define CLK_BUS_I2C2 74 +#define CLK_BUS_I2C3 75 +#define CLK_BUS_I2C4 76 +#define CLK_SPI0 77 +#define CLK_SPI1 78 +#define CLK_BUS_SPI0 79 +#define CLK_BUS_SPI1 80 +#define CLK_EMAC_25M 81 +#define CLK_BUS_EMAC0 82 +#define CLK_BUS_EMAC1 83 +#define CLK_TS 84 +#define CLK_BUS_TS 85 +#define CLK_BUS_THS 86 +#define CLK_SPDIF 87 +#define CLK_BUS_SPDIF 88 +#define CLK_DMIC 89 +#define CLK_BUS_DMIC 90 +#define CLK_AUDIO_CODEC_1X 91 +#define CLK_AUDIO_CODEC_4X 92 +#define CLK_BUS_AUDIO_CODEC 93 +#define CLK_AUDIO_HUB 94 +#define CLK_BUS_AUDIO_HUB 95 +#define CLK_USB_OHCI0 96 +#define CLK_USB_PHY0 97 +#define CLK_USB_OHCI1 98 +#define CLK_USB_PHY1 99 +#define CLK_USB_OHCI2 100 +#define CLK_USB_PHY2 101 +#define CLK_USB_OHCI3 102 +#define CLK_USB_PHY3 103 +#define CLK_BUS_OHCI0 104 +#define CLK_BUS_OHCI1 105 +#define CLK_BUS_OHCI2 106 +#define CLK_BUS_OHCI3 107 +#define CLK_BUS_EHCI0 108 +#define CLK_BUS_EHCI1 109 +#define CLK_BUS_EHCI2 110 +#define CLK_BUS_EHCI3 111 +#define CLK_BUS_OTG 112 +#define CLK_BUS_KEYADC 113 +#define CLK_HDMI 114 +#define CLK_HDMI_SLOW 115 +#define CLK_HDMI_CEC 116 +#define CLK_BUS_HDMI 117 +#define CLK_BUS_TCON_TOP 118 +#define CLK_TCON_TV0 119 +#define CLK_TCON_TV1 120 +#define CLK_BUS_TCON_TV0 121 +#define CLK_BUS_TCON_TV1 122 +#define CLK_TVE0 123 +#define CLK_BUS_TVE_TOP 124 +#define CLK_BUS_TVE0 125 +#define CLK_HDCP 126 +#define CLK_BUS_HDCP 127 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */ diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index ab8b8a737a0a..9cfcc3baa52c 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -307,7 +307,7 @@ #define TEGRA210_CLK_AUDIO4 275 #define TEGRA210_CLK_SPDIF 276 /* 277 */ -/* 278 */ +#define TEGRA210_CLK_QSPI_PM 278 /* 279 */ /* 280 */ #define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */ diff --git a/include/dt-bindings/clock/xlnx-vcu.h b/include/dt-bindings/clock/xlnx-vcu.h new file mode 100644 index 000000000000..1ed76b9563b6 --- /dev/null +++ b/include/dt-bindings/clock/xlnx-vcu.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de> + */ + +#ifndef _DT_BINDINGS_CLOCK_XLNX_VCU_H +#define _DT_BINDINGS_CLOCK_XLNX_VCU_H + +#define CLK_XVCU_ENC_CORE 0 +#define CLK_XVCU_ENC_MCU 1 +#define CLK_XVCU_DEC_CORE 2 +#define CLK_XVCU_DEC_MCU 3 +#define CLK_XVCU_NUM_CLOCKS 4 + +#endif /* _DT_BINDINGS_CLOCK_XLNX_VCU_H */ diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h deleted file mode 100644 index e04126111aae..000000000000 --- a/include/dt-bindings/clock/zx296702-clock.h +++ /dev/null @@ -1,180 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2014 Linaro Ltd. - * Copyright (C) 2014 ZTE Corporation. - */ - -#ifndef __DT_BINDINGS_CLOCK_ZX296702_H -#define __DT_BINDINGS_CLOCK_ZX296702_H - -#define ZX296702_OSC 0 -#define ZX296702_PLL_A9 1 -#define ZX296702_PLL_A9_350M 2 -#define ZX296702_PLL_MAC_1000M 3 -#define ZX296702_PLL_MAC_333M 4 -#define ZX296702_PLL_MM0_1188M 5 -#define ZX296702_PLL_MM0_396M 6 -#define ZX296702_PLL_MM0_198M 7 -#define ZX296702_PLL_MM1_108M 8 -#define ZX296702_PLL_MM1_72M 9 -#define ZX296702_PLL_MM1_54M 10 -#define ZX296702_PLL_LSP_104M 11 -#define ZX296702_PLL_LSP_26M 12 -#define ZX296702_PLL_AUDIO_294M912 13 -#define ZX296702_PLL_DDR_266M 14 -#define ZX296702_CLK_148M5 15 -#define ZX296702_MATRIX_ACLK 16 -#define ZX296702_MAIN_HCLK 17 -#define ZX296702_MAIN_PCLK 18 -#define ZX296702_CLK_500 19 -#define ZX296702_CLK_250 20 -#define ZX296702_CLK_125 21 -#define ZX296702_CLK_74M25 22 -#define ZX296702_A9_WCLK 23 -#define ZX296702_A9_AS1_ACLK_MUX 24 -#define ZX296702_A9_TRACE_CLKIN_MUX 25 -#define ZX296702_A9_AS1_ACLK_DIV 26 -#define ZX296702_CLK_2 27 -#define ZX296702_CLK_27 28 -#define ZX296702_DECPPU_ACLK_MUX 29 -#define ZX296702_PPU_ACLK_MUX 30 -#define ZX296702_MALI400_ACLK_MUX 31 -#define ZX296702_VOU_ACLK_MUX 32 -#define ZX296702_VOU_MAIN_WCLK_MUX 33 -#define ZX296702_VOU_AUX_WCLK_MUX 34 -#define ZX296702_VOU_SCALER_WCLK_MUX 35 -#define ZX296702_R2D_ACLK_MUX 36 -#define ZX296702_R2D_WCLK_MUX 37 -#define ZX296702_CLK_50 38 -#define ZX296702_CLK_25 39 -#define ZX296702_CLK_12 40 -#define ZX296702_CLK_16M384 41 -#define ZX296702_CLK_32K768 42 -#define ZX296702_SEC_WCLK_DIV 43 -#define ZX296702_DDR_WCLK_MUX 44 -#define ZX296702_NAND_WCLK_MUX 45 -#define ZX296702_LSP_26_WCLK_MUX 46 -#define ZX296702_A9_AS0_ACLK 47 -#define ZX296702_A9_AS1_ACLK 48 -#define ZX296702_A9_TRACE_CLKIN 49 -#define ZX296702_DECPPU_AXI_M_ACLK 50 -#define ZX296702_DECPPU_AHB_S_HCLK 51 -#define ZX296702_PPU_AXI_M_ACLK 52 -#define ZX296702_PPU_AHB_S_HCLK 53 -#define ZX296702_VOU_AXI_M_ACLK 54 -#define ZX296702_VOU_APB_PCLK 55 -#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56 -#define ZX296702_VOU_AUX_CHANNEL_WCLK 57 -#define ZX296702_VOU_HDMI_OSCLK_CEC 58 -#define ZX296702_VOU_SCALER_WCLK 59 -#define ZX296702_MALI400_AXI_M_ACLK 60 -#define ZX296702_MALI400_APB_PCLK 61 -#define ZX296702_R2D_WCLK 62 -#define ZX296702_R2D_AXI_M_ACLK 63 -#define ZX296702_R2D_AHB_HCLK 64 -#define ZX296702_DDR3_AXI_S0_ACLK 65 -#define ZX296702_DDR3_APB_PCLK 66 -#define ZX296702_DDR3_WCLK 67 -#define ZX296702_USB20_0_AHB_HCLK 68 -#define ZX296702_USB20_0_EXTREFCLK 69 -#define ZX296702_USB20_1_AHB_HCLK 70 -#define ZX296702_USB20_1_EXTREFCLK 71 -#define ZX296702_USB20_2_AHB_HCLK 72 -#define ZX296702_USB20_2_EXTREFCLK 73 -#define ZX296702_GMAC_AXI_M_ACLK 74 -#define ZX296702_GMAC_APB_PCLK 75 -#define ZX296702_GMAC_125_CLKIN 76 -#define ZX296702_GMAC_RMII_CLKIN 77 -#define ZX296702_GMAC_25M_CLK 78 -#define ZX296702_NANDFLASH_AHB_HCLK 79 -#define ZX296702_NANDFLASH_WCLK 80 -#define ZX296702_LSP0_APB_PCLK 81 -#define ZX296702_LSP0_AHB_HCLK 82 -#define ZX296702_LSP0_26M_WCLK 83 -#define ZX296702_LSP0_104M_WCLK 84 -#define ZX296702_LSP0_16M384_WCLK 85 -#define ZX296702_LSP1_APB_PCLK 86 -#define ZX296702_LSP1_26M_WCLK 87 -#define ZX296702_LSP1_104M_WCLK 88 -#define ZX296702_LSP1_32K_CLK 89 -#define ZX296702_AON_HCLK 90 -#define ZX296702_SYS_CTRL_PCLK 91 -#define ZX296702_DMA_PCLK 92 -#define ZX296702_DMA_ACLK 93 -#define ZX296702_SEC_HCLK 94 -#define ZX296702_AES_WCLK 95 -#define ZX296702_DES_WCLK 96 -#define ZX296702_IRAM_ACLK 97 -#define ZX296702_IROM_ACLK 98 -#define ZX296702_BOOT_CTRL_HCLK 99 -#define ZX296702_EFUSE_CLK_30 100 -#define ZX296702_VOU_MAIN_CHANNEL_DIV 101 -#define ZX296702_VOU_AUX_CHANNEL_DIV 102 -#define ZX296702_VOU_TV_ENC_HD_DIV 103 -#define ZX296702_VOU_TV_ENC_SD_DIV 104 -#define ZX296702_VL0_MUX 105 -#define ZX296702_VL1_MUX 106 -#define ZX296702_VL2_MUX 107 -#define ZX296702_GL0_MUX 108 -#define ZX296702_GL1_MUX 109 -#define ZX296702_GL2_MUX 110 -#define ZX296702_WB_MUX 111 -#define ZX296702_HDMI_MUX 112 -#define ZX296702_VOU_TV_ENC_HD_MUX 113 -#define ZX296702_VOU_TV_ENC_SD_MUX 114 -#define ZX296702_VL0_CLK 115 -#define ZX296702_VL1_CLK 116 -#define ZX296702_VL2_CLK 117 -#define ZX296702_GL0_CLK 118 -#define ZX296702_GL1_CLK 119 -#define ZX296702_GL2_CLK 120 -#define ZX296702_WB_CLK 121 -#define ZX296702_CL_CLK 122 -#define ZX296702_MAIN_MIX_CLK 123 -#define ZX296702_AUX_MIX_CLK 124 -#define ZX296702_HDMI_CLK 125 -#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126 -#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127 -#define ZX296702_A9_PERIPHCLK 128 -#define ZX296702_TOPCLK_END 129 - -#define ZX296702_SDMMC1_WCLK_MUX 0 -#define ZX296702_SDMMC1_WCLK_DIV 1 -#define ZX296702_SDMMC1_WCLK 2 -#define ZX296702_SDMMC1_PCLK 3 -#define ZX296702_SPDIF0_WCLK_MUX 4 -#define ZX296702_SPDIF0_WCLK 5 -#define ZX296702_SPDIF0_PCLK 6 -#define ZX296702_SPDIF0_DIV 7 -#define ZX296702_I2S0_WCLK_MUX 8 -#define ZX296702_I2S0_WCLK 9 -#define ZX296702_I2S0_PCLK 10 -#define ZX296702_I2S0_DIV 11 -#define ZX296702_I2S1_WCLK_MUX 12 -#define ZX296702_I2S1_WCLK 13 -#define ZX296702_I2S1_PCLK 14 -#define ZX296702_I2S1_DIV 15 -#define ZX296702_I2S2_WCLK_MUX 16 -#define ZX296702_I2S2_WCLK 17 -#define ZX296702_I2S2_PCLK 18 -#define ZX296702_I2S2_DIV 19 -#define ZX296702_GPIO_CLK 20 -#define ZX296702_LSP0CLK_END 21 - -#define ZX296702_UART0_WCLK_MUX 0 -#define ZX296702_UART0_WCLK 1 -#define ZX296702_UART0_PCLK 2 -#define ZX296702_UART1_WCLK_MUX 3 -#define ZX296702_UART1_WCLK 4 -#define ZX296702_UART1_PCLK 5 -#define ZX296702_SDMMC0_WCLK_MUX 6 -#define ZX296702_SDMMC0_WCLK_DIV 7 -#define ZX296702_SDMMC0_WCLK 8 -#define ZX296702_SDMMC0_PCLK 9 -#define ZX296702_SPDIF1_WCLK_MUX 10 -#define ZX296702_SPDIF1_WCLK 11 -#define ZX296702_SPDIF1_PCLK 12 -#define ZX296702_SPDIF1_DIV 13 -#define ZX296702_LSP1CLK_END 14 - -#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */ diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h new file mode 100644 index 000000000000..f0ae03634a96 --- /dev/null +++ b/include/dt-bindings/input/cros-ec-keyboard.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides the constants of the standard Chrome OS key matrix + * for cros-ec keyboard-controller bindings. + * + * Copyright (c) 2021 Google, Inc + */ + +#ifndef _CROS_EC_KEYBOARD_H +#define _CROS_EC_KEYBOARD_H + +#define CROS_STD_TOP_ROW_KEYMAP \ + MATRIX_KEY(0x00, 0x02, KEY_F1) \ + MATRIX_KEY(0x03, 0x02, KEY_F2) \ + MATRIX_KEY(0x02, 0x02, KEY_F3) \ + MATRIX_KEY(0x01, 0x02, KEY_F4) \ + MATRIX_KEY(0x03, 0x04, KEY_F5) \ + MATRIX_KEY(0x02, 0x04, KEY_F6) \ + MATRIX_KEY(0x01, 0x04, KEY_F7) \ + MATRIX_KEY(0x02, 0x09, KEY_F8) \ + MATRIX_KEY(0x01, 0x09, KEY_F9) \ + MATRIX_KEY(0x00, 0x04, KEY_F10) + +#define CROS_STD_MAIN_KEYMAP \ + MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA) \ + MATRIX_KEY(0x00, 0x03, KEY_B) \ + MATRIX_KEY(0x00, 0x05, KEY_RO) \ + MATRIX_KEY(0x00, 0x06, KEY_N) \ + MATRIX_KEY(0x00, 0x08, KEY_EQUAL) \ + MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) \ + MATRIX_KEY(0x01, 0x01, KEY_ESC) \ + MATRIX_KEY(0x01, 0x03, KEY_G) \ + MATRIX_KEY(0x01, 0x06, KEY_H) \ + MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) \ + MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) \ + MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) \ + \ + MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) \ + MATRIX_KEY(0x02, 0x01, KEY_TAB) \ + MATRIX_KEY(0x02, 0x03, KEY_T) \ + MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) \ + MATRIX_KEY(0x02, 0x06, KEY_Y) \ + MATRIX_KEY(0x02, 0x07, KEY_102ND) \ + MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) \ + MATRIX_KEY(0x02, 0x0a, KEY_YEN) \ + \ + MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) \ + MATRIX_KEY(0x03, 0x01, KEY_GRAVE) \ + MATRIX_KEY(0x03, 0x03, KEY_5) \ + MATRIX_KEY(0x03, 0x06, KEY_6) \ + MATRIX_KEY(0x03, 0x08, KEY_MINUS) \ + MATRIX_KEY(0x03, 0x09, KEY_SLEEP) \ + MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) \ + MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) \ + \ + MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) \ + MATRIX_KEY(0x04, 0x01, KEY_A) \ + MATRIX_KEY(0x04, 0x02, KEY_D) \ + MATRIX_KEY(0x04, 0x03, KEY_F) \ + MATRIX_KEY(0x04, 0x04, KEY_S) \ + MATRIX_KEY(0x04, 0x05, KEY_K) \ + MATRIX_KEY(0x04, 0x06, KEY_J) \ + MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) \ + MATRIX_KEY(0x04, 0x09, KEY_L) \ + MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH) \ + MATRIX_KEY(0x04, 0x0b, KEY_ENTER) \ + \ + MATRIX_KEY(0x05, 0x01, KEY_Z) \ + MATRIX_KEY(0x05, 0x02, KEY_C) \ + MATRIX_KEY(0x05, 0x03, KEY_V) \ + MATRIX_KEY(0x05, 0x04, KEY_X) \ + MATRIX_KEY(0x05, 0x05, KEY_COMMA) \ + MATRIX_KEY(0x05, 0x06, KEY_M) \ + MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) \ + MATRIX_KEY(0x05, 0x08, KEY_SLASH) \ + MATRIX_KEY(0x05, 0x09, KEY_DOT) \ + MATRIX_KEY(0x05, 0x0b, KEY_SPACE) \ + \ + MATRIX_KEY(0x06, 0x01, KEY_1) \ + MATRIX_KEY(0x06, 0x02, KEY_3) \ + MATRIX_KEY(0x06, 0x03, KEY_4) \ + MATRIX_KEY(0x06, 0x04, KEY_2) \ + MATRIX_KEY(0x06, 0x05, KEY_8) \ + MATRIX_KEY(0x06, 0x06, KEY_7) \ + MATRIX_KEY(0x06, 0x08, KEY_0) \ + MATRIX_KEY(0x06, 0x09, KEY_9) \ + MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT) \ + MATRIX_KEY(0x06, 0x0b, KEY_DOWN) \ + MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) \ + \ + MATRIX_KEY(0x07, 0x01, KEY_Q) \ + MATRIX_KEY(0x07, 0x02, KEY_E) \ + MATRIX_KEY(0x07, 0x03, KEY_R) \ + MATRIX_KEY(0x07, 0x04, KEY_W) \ + MATRIX_KEY(0x07, 0x05, KEY_I) \ + MATRIX_KEY(0x07, 0x06, KEY_U) \ + MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) \ + MATRIX_KEY(0x07, 0x08, KEY_P) \ + MATRIX_KEY(0x07, 0x09, KEY_O) \ + MATRIX_KEY(0x07, 0x0b, KEY_UP) \ + MATRIX_KEY(0x07, 0x0c, KEY_LEFT) + +#endif /* _CROS_EC_KEYBOARD_H */ diff --git a/include/dt-bindings/interconnect/qcom,msm8939.h b/include/dt-bindings/interconnect/qcom,msm8939.h new file mode 100644 index 000000000000..c22369a4b9f5 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,msm8939.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm interconnect IDs + * + * Copyright (c) 2020, Linaro Ltd. + * Author: Jun Nie <jun.nie@linaro.org> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8939_H + +#define BIMC_SNOC_SLV 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QDSS_ETR 2 +#define MASTER_SNOC_CFG 3 +#define PCNOC_SNOC_SLV 4 +#define SLAVE_APSS 5 +#define SLAVE_CATS_128 6 +#define SLAVE_OCMEM_64 7 +#define SLAVE_IMEM 8 +#define SLAVE_QDSS_STM 9 +#define SLAVE_SRVC_SNOC 10 +#define SNOC_BIMC_0_MAS 11 +#define SNOC_BIMC_1_MAS 12 +#define SNOC_BIMC_2_MAS 13 +#define SNOC_INT_0 14 +#define SNOC_INT_1 15 +#define SNOC_INT_BIMC 16 +#define SNOC_PCNOC_MAS 17 +#define SNOC_QDSS_INT 18 + +#define MASTER_VIDEO_P0 0 +#define MASTER_JPEG 1 +#define MASTER_VFE 2 +#define MASTER_MDP_PORT0 3 +#define MASTER_MDP_PORT1 4 +#define MASTER_CPP 5 +#define SNOC_MM_INT_0 6 +#define SNOC_MM_INT_1 7 +#define SNOC_MM_INT_2 8 + +#define BIMC_SNOC_MAS 0 +#define MASTER_AMPSS_M0 1 +#define MASTER_GRAPHICS_3D 2 +#define MASTER_TCU0 3 +#define SLAVE_AMPSS_L2 4 +#define SLAVE_EBI_CH0 5 +#define SNOC_BIMC_0_SLV 6 +#define SNOC_BIMC_1_SLV 7 +#define SNOC_BIMC_2_SLV 8 + +#define MASTER_BLSP_1 0 +#define MASTER_DEHR 1 +#define MASTER_LPASS 2 +#define MASTER_CRYPTO_CORE0 3 +#define MASTER_SDCC_1 4 +#define MASTER_SDCC_2 5 +#define MASTER_SPDM 6 +#define MASTER_USB_HS1 7 +#define MASTER_USB_HS2 8 +#define PCNOC_INT_0 9 +#define PCNOC_INT_1 10 +#define PCNOC_MAS_0 11 +#define PCNOC_MAS_1 12 +#define PCNOC_SLV_0 13 +#define PCNOC_SLV_1 14 +#define PCNOC_SLV_2 15 +#define PCNOC_SLV_3 16 +#define PCNOC_SLV_4 17 +#define PCNOC_SLV_8 18 +#define PCNOC_SLV_9 19 +#define PCNOC_SNOC_MAS 20 +#define SLAVE_BIMC_CFG 21 +#define SLAVE_BLSP_1 22 +#define SLAVE_BOOT_ROM 23 +#define SLAVE_CAMERA_CFG 24 +#define SLAVE_CLK_CTL 25 +#define SLAVE_CRYPTO_0_CFG 26 +#define SLAVE_DEHR_CFG 27 +#define SLAVE_DISPLAY_CFG 28 +#define SLAVE_GRAPHICS_3D_CFG 29 +#define SLAVE_IMEM_CFG 30 +#define SLAVE_LPASS 31 +#define SLAVE_MPM 32 +#define SLAVE_MSG_RAM 33 +#define SLAVE_MSS 34 +#define SLAVE_PDM 35 +#define SLAVE_PMIC_ARB 36 +#define SLAVE_PCNOC_CFG 37 +#define SLAVE_PRNG 38 +#define SLAVE_QDSS_CFG 39 +#define SLAVE_RBCPR_CFG 40 +#define SLAVE_SDCC_1 41 +#define SLAVE_SDCC_2 42 +#define SLAVE_SECURITY 43 +#define SLAVE_SNOC_CFG 44 +#define SLAVE_SPDM 45 +#define SLAVE_TCSR 46 +#define SLAVE_TLMM 47 +#define SLAVE_USB_HS1 48 +#define SLAVE_USB_HS2 49 +#define SLAVE_VENUS_CFG 50 +#define SNOC_PCNOC_SLV 51 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sdx55.h b/include/dt-bindings/interconnect/qcom,sdx55.h new file mode 100644 index 000000000000..bfb6524a2d90 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdx55.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm SDX55 interconnect IDs + * + * Copyright (c) 2021, Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX55_H + +#define MASTER_LLCC 0 +#define SLAVE_EBI_CH0 1 + +#define MASTER_TCU_0 0 +#define MASTER_SNOC_GC_MEM_NOC 1 +#define MASTER_AMPSS_M0 2 +#define SLAVE_LLCC 3 +#define SLAVE_MEM_NOC_SNOC 4 +#define SLAVE_MEM_NOC_PCIE_SNOC 5 + +#define MASTER_AUDIO 0 +#define MASTER_BLSP_1 1 +#define MASTER_QDSS_BAM 2 +#define MASTER_QPIC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_SPMI_FETCHER 5 +#define MASTER_ANOC_SNOC 6 +#define MASTER_IPA 7 +#define MASTER_MEM_NOC_SNOC 8 +#define MASTER_MEM_NOC_PCIE_SNOC 9 +#define MASTER_CRYPTO_CORE_0 10 +#define MASTER_EMAC 11 +#define MASTER_IPA_PCIE 12 +#define MASTER_PCIE 13 +#define MASTER_QDSS_ETR 14 +#define MASTER_SDCC_1 15 +#define MASTER_USB3 16 +#define SLAVE_AOP 17 +#define SLAVE_AOSS 18 +#define SLAVE_APPSS 19 +#define SLAVE_AUDIO 20 +#define SLAVE_BLSP_1 21 +#define SLAVE_CLK_CTL 22 +#define SLAVE_CRYPTO_0_CFG 23 +#define SLAVE_CNOC_DDRSS 24 +#define SLAVE_ECC_CFG 25 +#define SLAVE_EMAC_CFG 26 +#define SLAVE_IMEM_CFG 27 +#define SLAVE_IPA_CFG 28 +#define SLAVE_CNOC_MSS 29 +#define SLAVE_PCIE_PARF 30 +#define SLAVE_PDM 31 +#define SLAVE_PRNG 32 +#define SLAVE_QDSS_CFG 33 +#define SLAVE_QPIC 34 +#define SLAVE_SDCC_1 35 +#define SLAVE_SNOC_CFG 36 +#define SLAVE_SPMI_FETCHER 37 +#define SLAVE_SPMI_VGI_COEX 38 +#define SLAVE_TCSR 39 +#define SLAVE_TLMM 40 +#define SLAVE_USB3 41 +#define SLAVE_USB3_PHY_CFG 42 +#define SLAVE_ANOC_SNOC 43 +#define SLAVE_SNOC_MEM_NOC_GC 44 +#define SLAVE_OCIMEM 45 +#define SLAVE_SERVICE_SNOC 46 +#define SLAVE_PCIE_0 47 +#define SLAVE_QDSS_STM 48 +#define SLAVE_TCU 49 + +#define MASTER_IPA_CORE 0 +#define SLAVE_IPA_CORE 1 + +#endif diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h index 2d85c2ec6cfd..25d03526f142 100644 --- a/include/dt-bindings/memory/mt2701-larb-port.h +++ b/include/dt-bindings/memory/mt2701-larb-port.h @@ -4,8 +4,8 @@ * Author: Honghui Zhang <honghui.zhang@mediatek.com> */ -#ifndef _MT2701_LARB_PORT_H_ -#define _MT2701_LARB_PORT_H_ +#ifndef _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT2701_LARB_PORT_H_ /* * Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers, diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h index 6f9aa7349cef..e41a2841bcff 100644 --- a/include/dt-bindings/memory/mt2712-larb-port.h +++ b/include/dt-bindings/memory/mt2712-larb-port.h @@ -3,10 +3,10 @@ * Copyright (c) 2017 MediaTek Inc. * Author: Yong Wu <yong.wu@mediatek.com> */ -#ifndef __DTS_IOMMU_PORT_MT2712_H -#define __DTS_IOMMU_PORT_MT2712_H +#ifndef _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT2712_LARB_PORT_H_ -#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#include <dt-bindings/memory/mtk-memory-port.h> #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h index 2ad0899fbf2f..3fb438a96e35 100644 --- a/include/dt-bindings/memory/mt6779-larb-port.h +++ b/include/dt-bindings/memory/mt6779-larb-port.h @@ -4,10 +4,10 @@ * Author: Chao Hao <chao.hao@mediatek.com> */ -#ifndef _DTS_IOMMU_PORT_MT6779_H_ -#define _DTS_IOMMU_PORT_MT6779_H_ +#ifndef _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT6779_LARB_PORT_H_ -#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#include <dt-bindings/memory/mtk-memory-port.h> #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/mt8167-larb-port.h b/include/dt-bindings/memory/mt8167-larb-port.h index 000fb299a408..aae57d4824ca 100644 --- a/include/dt-bindings/memory/mt8167-larb-port.h +++ b/include/dt-bindings/memory/mt8167-larb-port.h @@ -5,10 +5,10 @@ * Author: Honghui Zhang <honghui.zhang@mediatek.com> * Author: Fabien Parent <fparent@baylibre.com> */ -#ifndef __DTS_IOMMU_PORT_MT8167_H -#define __DTS_IOMMU_PORT_MT8167_H +#ifndef _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8167_LARB_PORT_H_ -#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#include <dt-bindings/memory/mtk-memory-port.h> #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h index 9f31ccfeca21..167a7fc51868 100644 --- a/include/dt-bindings/memory/mt8173-larb-port.h +++ b/include/dt-bindings/memory/mt8173-larb-port.h @@ -3,10 +3,10 @@ * Copyright (c) 2015-2016 MediaTek Inc. * Author: Yong Wu <yong.wu@mediatek.com> */ -#ifndef __DTS_IOMMU_PORT_MT8173_H -#define __DTS_IOMMU_PORT_MT8173_H +#ifndef _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8173_LARB_PORT_H_ -#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#include <dt-bindings/memory/mtk-memory-port.h> #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h index 2c579f305162..36abdf0ce5a2 100644 --- a/include/dt-bindings/memory/mt8183-larb-port.h +++ b/include/dt-bindings/memory/mt8183-larb-port.h @@ -3,10 +3,10 @@ * Copyright (c) 2018 MediaTek Inc. * Author: Yong Wu <yong.wu@mediatek.com> */ -#ifndef __DTS_IOMMU_PORT_MT8183_H -#define __DTS_IOMMU_PORT_MT8183_H +#ifndef _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8183_LARB_PORT_H_ -#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#include <dt-bindings/memory/mtk-memory-port.h> #define M4U_LARB0_ID 0 #define M4U_LARB1_ID 1 diff --git a/include/dt-bindings/memory/mt8192-larb-port.h b/include/dt-bindings/memory/mt8192-larb-port.h new file mode 100644 index 000000000000..23035a52c675 --- /dev/null +++ b/include/dt-bindings/memory/mt8192-larb-port.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 MediaTek Inc. + * + * Author: Chao Hao <chao.hao@mediatek.com> + * Author: Yong Wu <yong.wu@mediatek.com> + */ +#ifndef _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8192_LARB_PORT_H_ + +#include <dt-bindings/memory/mtk-memory-port.h> + +/* + * MM IOMMU supports 16GB dma address. + * + * The address will preassign like this: + * + * modules dma-address-region larbs-ports + * disp 0 ~ 4G larb0/1 + * vcodec 4G ~ 8G larb4/5/7 + * cam/mdp 8G ~ 12G larb2/9/11/13/14/16/17/18/19/20 + * CCU0 0x4000_0000 ~ 0x43ff_ffff larb13: port 9/10 + * CCU1 0x4400_0000 ~ 0x47ff_ffff larb14: port 4/5 + * + * larb3/6/8/10/12/15 is null. + */ + +/* larb0 */ +#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0) +#define M4U_PORT_L0_OVL_RDMA0_HDR MTK_M4U_ID(0, 1) +#define M4U_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2) +#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 3) +#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 4) +#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5) + +/* larb1 */ +#define M4U_PORT_L1_OVL_2L_RDMA0_HDR MTK_M4U_ID(1, 0) +#define M4U_PORT_L1_OVL_2L_RDMA2_HDR MTK_M4U_ID(1, 1) +#define M4U_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 2) +#define M4U_PORT_L1_OVL_2L_RDMA2 MTK_M4U_ID(1, 3) +#define M4U_PORT_L1_DISP_MDP_RDMA4 MTK_M4U_ID(1, 4) +#define M4U_PORT_L1_DISP_RDMA4 MTK_M4U_ID(1, 5) +#define M4U_PORT_L1_DISP_UFBC_WDMA0 MTK_M4U_ID(1, 6) +#define M4U_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 7) + +/* larb2 */ +#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0) +#define M4U_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1) +#define M4U_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2) +#define M4U_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3) +#define M4U_PORT_L2_MDP_DISP_FAKE0 MTK_M4U_ID(2, 4) + +/* larb3: null */ + +/* larb4 */ +#define M4U_PORT_L4_VDEC_MC_EXT MTK_M4U_ID(4, 0) +#define M4U_PORT_L4_VDEC_UFO_EXT MTK_M4U_ID(4, 1) +#define M4U_PORT_L4_VDEC_PP_EXT MTK_M4U_ID(4, 2) +#define M4U_PORT_L4_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3) +#define M4U_PORT_L4_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4) +#define M4U_PORT_L4_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5) +#define M4U_PORT_L4_VDEC_TILE_EXT MTK_M4U_ID(4, 6) +#define M4U_PORT_L4_VDEC_VLD_EXT MTK_M4U_ID(4, 7) +#define M4U_PORT_L4_VDEC_VLD2_EXT MTK_M4U_ID(4, 8) +#define M4U_PORT_L4_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9) +#define M4U_PORT_L4_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 10) + +/* larb5 */ +#define M4U_PORT_L5_VDEC_LAT0_VLD_EXT MTK_M4U_ID(5, 0) +#define M4U_PORT_L5_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(5, 1) +#define M4U_PORT_L5_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(5, 2) +#define M4U_PORT_L5_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(5, 3) +#define M4U_PORT_L5_VDEC_LAT0_TILE_EXT MTK_M4U_ID(5, 4) +#define M4U_PORT_L5_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(5, 5) +#define M4U_PORT_L5_VDEC_LAT0_RG_CTRL_DMA_EXT MTK_M4U_ID(5, 6) +#define M4U_PORT_L5_VDEC_UFO_ENC_EXT MTK_M4U_ID(5, 7) + +/* larb6: null */ + +/* larb7 */ +#define M4U_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0) +#define M4U_PORT_L7_VENC_REC MTK_M4U_ID(7, 1) +#define M4U_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2) +#define M4U_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3) +#define M4U_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4) +#define M4U_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5) +#define M4U_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6) +#define M4U_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7) +#define M4U_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8) +#define M4U_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9) +#define M4U_PORT_L7_JPGENC_Q_RDMA MTK_M4U_ID(7, 10) +#define M4U_PORT_L7_JPGENC_C_TABLE MTK_M4U_ID(7, 11) +#define M4U_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12) +#define M4U_PORT_L7_VENC_SUB_R_LUMA MTK_M4U_ID(7, 13) +#define M4U_PORT_L7_VENC_SUB_W_LUMA MTK_M4U_ID(7, 14) + +/* larb8: null */ + +/* larb9 */ +#define M4U_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0) +#define M4U_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1) +#define M4U_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2) +#define M4U_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3) +#define M4U_PORT_L9_IMG_ICE_D1 MTK_M4U_ID(9, 4) +#define M4U_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5) +#define M4U_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6) +#define M4U_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7) +#define M4U_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8) +#define M4U_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9) +#define M4U_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10) +#define M4U_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11) +#define M4U_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12) +#define M4U_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13) +#define M4U_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14) + +/* larb10: null */ + +/* larb11 */ +#define M4U_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0) +#define M4U_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1) +#define M4U_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2) +#define M4U_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3) +#define M4U_PORT_L11_IMG_ICE_D1 MTK_M4U_ID(11, 4) +#define M4U_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5) +#define M4U_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6) +#define M4U_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7) +#define M4U_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8) +#define M4U_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9) +#define M4U_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10) +#define M4U_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11) +#define M4U_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12) +#define M4U_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13) +#define M4U_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14) +#define M4U_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15) +#define M4U_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16) +#define M4U_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17) +#define M4U_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18) +#define M4U_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19) +#define M4U_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20) +#define M4U_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21) +#define M4U_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22) +#define M4U_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23) +#define M4U_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24) +#define M4U_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25) + +/* larb12: null */ + +/* larb13 */ +#define M4U_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0) +#define M4U_PORT_L13_CAM_MRAWO0 MTK_M4U_ID(13, 1) +#define M4U_PORT_L13_CAM_MRAWO1 MTK_M4U_ID(13, 2) +#define M4U_PORT_L13_CAM_CAMSV1 MTK_M4U_ID(13, 3) +#define M4U_PORT_L13_CAM_CAMSV2 MTK_M4U_ID(13, 4) +#define M4U_PORT_L13_CAM_CAMSV3 MTK_M4U_ID(13, 5) +#define M4U_PORT_L13_CAM_CAMSV4 MTK_M4U_ID(13, 6) +#define M4U_PORT_L13_CAM_CAMSV5 MTK_M4U_ID(13, 7) +#define M4U_PORT_L13_CAM_CAMSV6 MTK_M4U_ID(13, 8) +#define M4U_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9) +#define M4U_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10) +#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11) + +/* larb14 */ +#define M4U_PORT_L14_CAM_RESERVE1 MTK_M4U_ID(14, 0) +#define M4U_PORT_L14_CAM_RESERVE2 MTK_M4U_ID(14, 1) +#define M4U_PORT_L14_CAM_RESERVE3 MTK_M4U_ID(14, 2) +#define M4U_PORT_L14_CAM_CAMSV0 MTK_M4U_ID(14, 3) +#define M4U_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4) +#define M4U_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5) + +/* larb15: null */ + +/* larb16 */ +#define M4U_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0) +#define M4U_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1) +#define M4U_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2) +#define M4U_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3) +#define M4U_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4) +#define M4U_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5) +#define M4U_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6) +#define M4U_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7) +#define M4U_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8) +#define M4U_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9) +#define M4U_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10) +#define M4U_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11) +#define M4U_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12) +#define M4U_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13) +#define M4U_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14) +#define M4U_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15) +#define M4U_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16) + +/* larb17 */ +#define M4U_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0) +#define M4U_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1) +#define M4U_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2) +#define M4U_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3) +#define M4U_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4) +#define M4U_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5) +#define M4U_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6) +#define M4U_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7) +#define M4U_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8) +#define M4U_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9) +#define M4U_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10) +#define M4U_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11) +#define M4U_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12) +#define M4U_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13) +#define M4U_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14) +#define M4U_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15) +#define M4U_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16) + +/* larb18 */ +#define M4U_PORT_L18_CAM_IMGO_R1_C MTK_M4U_ID(18, 0) +#define M4U_PORT_L18_CAM_RRZO_R1_C MTK_M4U_ID(18, 1) +#define M4U_PORT_L18_CAM_CQI_R1_C MTK_M4U_ID(18, 2) +#define M4U_PORT_L18_CAM_BPCI_R1_C MTK_M4U_ID(18, 3) +#define M4U_PORT_L18_CAM_YUVO_R1_C MTK_M4U_ID(18, 4) +#define M4U_PORT_L18_CAM_UFDI_R2_C MTK_M4U_ID(18, 5) +#define M4U_PORT_L18_CAM_RAWI_R2_C MTK_M4U_ID(18, 6) +#define M4U_PORT_L18_CAM_RAWI_R3_C MTK_M4U_ID(18, 7) +#define M4U_PORT_L18_CAM_AAO_R1_C MTK_M4U_ID(18, 8) +#define M4U_PORT_L18_CAM_AFO_R1_C MTK_M4U_ID(18, 9) +#define M4U_PORT_L18_CAM_FLKO_R1_C MTK_M4U_ID(18, 10) +#define M4U_PORT_L18_CAM_LCESO_R1_C MTK_M4U_ID(18, 11) +#define M4U_PORT_L18_CAM_CRZO_R1_C MTK_M4U_ID(18, 12) +#define M4U_PORT_L18_CAM_LTMSO_R1_C MTK_M4U_ID(18, 13) +#define M4U_PORT_L18_CAM_RSSO_R1_C MTK_M4U_ID(18, 14) +#define M4U_PORT_L18_CAM_AAHO_R1_C MTK_M4U_ID(18, 15) +#define M4U_PORT_L18_CAM_LSCI_R1_C MTK_M4U_ID(18, 16) + +/* larb19 */ +#define M4U_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0) +#define M4U_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1) +#define M4U_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2) +#define M4U_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3) + +/* larb20 */ +#define M4U_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0) +#define M4U_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1) +#define M4U_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2) +#define M4U_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3) +#define M4U_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4) +#define M4U_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5) + +#endif diff --git a/include/dt-bindings/memory/mtk-memory-port.h b/include/dt-bindings/memory/mtk-memory-port.h new file mode 100644 index 000000000000..7d64103209af --- /dev/null +++ b/include/dt-bindings/memory/mtk-memory-port.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 MediaTek Inc. + * Author: Yong Wu <yong.wu@mediatek.com> + */ +#ifndef __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_ +#define __DT_BINDINGS_MEMORY_MTK_MEMORY_PORT_H_ + +#define MTK_LARB_NR_MAX 32 + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) +#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f) +#define MTK_M4U_TO_PORT(id) ((id) & 0x1f) + +#endif diff --git a/include/dt-bindings/pinctrl/k210-fpioa.h b/include/dt-bindings/pinctrl/k210-fpioa.h new file mode 100644 index 000000000000..314285eab3a1 --- /dev/null +++ b/include/dt-bindings/pinctrl/k210-fpioa.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com> + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef PINCTRL_K210_FPIOA_H +#define PINCTRL_K210_FPIOA_H + +/* + * Full list of FPIOA functions from + * kendryte-standalone-sdk/lib/drivers/include/fpioa.h + */ +#define K210_PCF_MASK GENMASK(7, 0) +#define K210_PCF_JTAG_TCLK 0 /* JTAG Test Clock */ +#define K210_PCF_JTAG_TDI 1 /* JTAG Test Data In */ +#define K210_PCF_JTAG_TMS 2 /* JTAG Test Mode Select */ +#define K210_PCF_JTAG_TDO 3 /* JTAG Test Data Out */ +#define K210_PCF_SPI0_D0 4 /* SPI0 Data 0 */ +#define K210_PCF_SPI0_D1 5 /* SPI0 Data 1 */ +#define K210_PCF_SPI0_D2 6 /* SPI0 Data 2 */ +#define K210_PCF_SPI0_D3 7 /* SPI0 Data 3 */ +#define K210_PCF_SPI0_D4 8 /* SPI0 Data 4 */ +#define K210_PCF_SPI0_D5 9 /* SPI0 Data 5 */ +#define K210_PCF_SPI0_D6 10 /* SPI0 Data 6 */ +#define K210_PCF_SPI0_D7 11 /* SPI0 Data 7 */ +#define K210_PCF_SPI0_SS0 12 /* SPI0 Chip Select 0 */ +#define K210_PCF_SPI0_SS1 13 /* SPI0 Chip Select 1 */ +#define K210_PCF_SPI0_SS2 14 /* SPI0 Chip Select 2 */ +#define K210_PCF_SPI0_SS3 15 /* SPI0 Chip Select 3 */ +#define K210_PCF_SPI0_ARB 16 /* SPI0 Arbitration */ +#define K210_PCF_SPI0_SCLK 17 /* SPI0 Serial Clock */ +#define K210_PCF_UARTHS_RX 18 /* UART High speed Receiver */ +#define K210_PCF_UARTHS_TX 19 /* UART High speed Transmitter */ +#define K210_PCF_RESV6 20 /* Reserved function */ +#define K210_PCF_RESV7 21 /* Reserved function */ +#define K210_PCF_CLK_SPI1 22 /* Clock SPI1 */ +#define K210_PCF_CLK_I2C1 23 /* Clock I2C1 */ +#define K210_PCF_GPIOHS0 24 /* GPIO High speed 0 */ +#define K210_PCF_GPIOHS1 25 /* GPIO High speed 1 */ +#define K210_PCF_GPIOHS2 26 /* GPIO High speed 2 */ +#define K210_PCF_GPIOHS3 27 /* GPIO High speed 3 */ +#define K210_PCF_GPIOHS4 28 /* GPIO High speed 4 */ +#define K210_PCF_GPIOHS5 29 /* GPIO High speed 5 */ +#define K210_PCF_GPIOHS6 30 /* GPIO High speed 6 */ +#define K210_PCF_GPIOHS7 31 /* GPIO High speed 7 */ +#define K210_PCF_GPIOHS8 32 /* GPIO High speed 8 */ +#define K210_PCF_GPIOHS9 33 /* GPIO High speed 9 */ +#define K210_PCF_GPIOHS10 34 /* GPIO High speed 10 */ +#define K210_PCF_GPIOHS11 35 /* GPIO High speed 11 */ +#define K210_PCF_GPIOHS12 36 /* GPIO High speed 12 */ +#define K210_PCF_GPIOHS13 37 /* GPIO High speed 13 */ +#define K210_PCF_GPIOHS14 38 /* GPIO High speed 14 */ +#define K210_PCF_GPIOHS15 39 /* GPIO High speed 15 */ +#define K210_PCF_GPIOHS16 40 /* GPIO High speed 16 */ +#define K210_PCF_GPIOHS17 41 /* GPIO High speed 17 */ +#define K210_PCF_GPIOHS18 42 /* GPIO High speed 18 */ +#define K210_PCF_GPIOHS19 43 /* GPIO High speed 19 */ +#define K210_PCF_GPIOHS20 44 /* GPIO High speed 20 */ +#define K210_PCF_GPIOHS21 45 /* GPIO High speed 21 */ +#define K210_PCF_GPIOHS22 46 /* GPIO High speed 22 */ +#define K210_PCF_GPIOHS23 47 /* GPIO High speed 23 */ +#define K210_PCF_GPIOHS24 48 /* GPIO High speed 24 */ +#define K210_PCF_GPIOHS25 49 /* GPIO High speed 25 */ +#define K210_PCF_GPIOHS26 50 /* GPIO High speed 26 */ +#define K210_PCF_GPIOHS27 51 /* GPIO High speed 27 */ +#define K210_PCF_GPIOHS28 52 /* GPIO High speed 28 */ +#define K210_PCF_GPIOHS29 53 /* GPIO High speed 29 */ +#define K210_PCF_GPIOHS30 54 /* GPIO High speed 30 */ +#define K210_PCF_GPIOHS31 55 /* GPIO High speed 31 */ +#define K210_PCF_GPIO0 56 /* GPIO pin 0 */ +#define K210_PCF_GPIO1 57 /* GPIO pin 1 */ +#define K210_PCF_GPIO2 58 /* GPIO pin 2 */ +#define K210_PCF_GPIO3 59 /* GPIO pin 3 */ +#define K210_PCF_GPIO4 60 /* GPIO pin 4 */ +#define K210_PCF_GPIO5 61 /* GPIO pin 5 */ +#define K210_PCF_GPIO6 62 /* GPIO pin 6 */ +#define K210_PCF_GPIO7 63 /* GPIO pin 7 */ +#define K210_PCF_UART1_RX 64 /* UART1 Receiver */ +#define K210_PCF_UART1_TX 65 /* UART1 Transmitter */ +#define K210_PCF_UART2_RX 66 /* UART2 Receiver */ +#define K210_PCF_UART2_TX 67 /* UART2 Transmitter */ +#define K210_PCF_UART3_RX 68 /* UART3 Receiver */ +#define K210_PCF_UART3_TX 69 /* UART3 Transmitter */ +#define K210_PCF_SPI1_D0 70 /* SPI1 Data 0 */ +#define K210_PCF_SPI1_D1 71 /* SPI1 Data 1 */ +#define K210_PCF_SPI1_D2 72 /* SPI1 Data 2 */ +#define K210_PCF_SPI1_D3 73 /* SPI1 Data 3 */ +#define K210_PCF_SPI1_D4 74 /* SPI1 Data 4 */ +#define K210_PCF_SPI1_D5 75 /* SPI1 Data 5 */ +#define K210_PCF_SPI1_D6 76 /* SPI1 Data 6 */ +#define K210_PCF_SPI1_D7 77 /* SPI1 Data 7 */ +#define K210_PCF_SPI1_SS0 78 /* SPI1 Chip Select 0 */ +#define K210_PCF_SPI1_SS1 79 /* SPI1 Chip Select 1 */ +#define K210_PCF_SPI1_SS2 80 /* SPI1 Chip Select 2 */ +#define K210_PCF_SPI1_SS3 81 /* SPI1 Chip Select 3 */ +#define K210_PCF_SPI1_ARB 82 /* SPI1 Arbitration */ +#define K210_PCF_SPI1_SCLK 83 /* SPI1 Serial Clock */ +#define K210_PCF_SPI2_D0 84 /* SPI2 Data 0 */ +#define K210_PCF_SPI2_SS 85 /* SPI2 Select */ +#define K210_PCF_SPI2_SCLK 86 /* SPI2 Serial Clock */ +#define K210_PCF_I2S0_MCLK 87 /* I2S0 Master Clock */ +#define K210_PCF_I2S0_SCLK 88 /* I2S0 Serial Clock(BCLK) */ +#define K210_PCF_I2S0_WS 89 /* I2S0 Word Select(LRCLK) */ +#define K210_PCF_I2S0_IN_D0 90 /* I2S0 Serial Data Input 0 */ +#define K210_PCF_I2S0_IN_D1 91 /* I2S0 Serial Data Input 1 */ +#define K210_PCF_I2S0_IN_D2 92 /* I2S0 Serial Data Input 2 */ +#define K210_PCF_I2S0_IN_D3 93 /* I2S0 Serial Data Input 3 */ +#define K210_PCF_I2S0_OUT_D0 94 /* I2S0 Serial Data Output 0 */ +#define K210_PCF_I2S0_OUT_D1 95 /* I2S0 Serial Data Output 1 */ +#define K210_PCF_I2S0_OUT_D2 96 /* I2S0 Serial Data Output 2 */ +#define K210_PCF_I2S0_OUT_D3 97 /* I2S0 Serial Data Output 3 */ +#define K210_PCF_I2S1_MCLK 98 /* I2S1 Master Clock */ +#define K210_PCF_I2S1_SCLK 99 /* I2S1 Serial Clock(BCLK) */ +#define K210_PCF_I2S1_WS 100 /* I2S1 Word Select(LRCLK) */ +#define K210_PCF_I2S1_IN_D0 101 /* I2S1 Serial Data Input 0 */ +#define K210_PCF_I2S1_IN_D1 102 /* I2S1 Serial Data Input 1 */ +#define K210_PCF_I2S1_IN_D2 103 /* I2S1 Serial Data Input 2 */ +#define K210_PCF_I2S1_IN_D3 104 /* I2S1 Serial Data Input 3 */ +#define K210_PCF_I2S1_OUT_D0 105 /* I2S1 Serial Data Output 0 */ +#define K210_PCF_I2S1_OUT_D1 106 /* I2S1 Serial Data Output 1 */ +#define K210_PCF_I2S1_OUT_D2 107 /* I2S1 Serial Data Output 2 */ +#define K210_PCF_I2S1_OUT_D3 108 /* I2S1 Serial Data Output 3 */ +#define K210_PCF_I2S2_MCLK 109 /* I2S2 Master Clock */ +#define K210_PCF_I2S2_SCLK 110 /* I2S2 Serial Clock(BCLK) */ +#define K210_PCF_I2S2_WS 111 /* I2S2 Word Select(LRCLK) */ +#define K210_PCF_I2S2_IN_D0 112 /* I2S2 Serial Data Input 0 */ +#define K210_PCF_I2S2_IN_D1 113 /* I2S2 Serial Data Input 1 */ +#define K210_PCF_I2S2_IN_D2 114 /* I2S2 Serial Data Input 2 */ +#define K210_PCF_I2S2_IN_D3 115 /* I2S2 Serial Data Input 3 */ +#define K210_PCF_I2S2_OUT_D0 116 /* I2S2 Serial Data Output 0 */ +#define K210_PCF_I2S2_OUT_D1 117 /* I2S2 Serial Data Output 1 */ +#define K210_PCF_I2S2_OUT_D2 118 /* I2S2 Serial Data Output 2 */ +#define K210_PCF_I2S2_OUT_D3 119 /* I2S2 Serial Data Output 3 */ +#define K210_PCF_RESV0 120 /* Reserved function */ +#define K210_PCF_RESV1 121 /* Reserved function */ +#define K210_PCF_RESV2 122 /* Reserved function */ +#define K210_PCF_RESV3 123 /* Reserved function */ +#define K210_PCF_RESV4 124 /* Reserved function */ +#define K210_PCF_RESV5 125 /* Reserved function */ +#define K210_PCF_I2C0_SCLK 126 /* I2C0 Serial Clock */ +#define K210_PCF_I2C0_SDA 127 /* I2C0 Serial Data */ +#define K210_PCF_I2C1_SCLK 128 /* I2C1 Serial Clock */ +#define K210_PCF_I2C1_SDA 129 /* I2C1 Serial Data */ +#define K210_PCF_I2C2_SCLK 130 /* I2C2 Serial Clock */ +#define K210_PCF_I2C2_SDA 131 /* I2C2 Serial Data */ +#define K210_PCF_DVP_XCLK 132 /* DVP System Clock */ +#define K210_PCF_DVP_RST 133 /* DVP System Reset */ +#define K210_PCF_DVP_PWDN 134 /* DVP Power Down Mode */ +#define K210_PCF_DVP_VSYNC 135 /* DVP Vertical Sync */ +#define K210_PCF_DVP_HSYNC 136 /* DVP Horizontal Sync */ +#define K210_PCF_DVP_PCLK 137 /* Pixel Clock */ +#define K210_PCF_DVP_D0 138 /* Data Bit 0 */ +#define K210_PCF_DVP_D1 139 /* Data Bit 1 */ +#define K210_PCF_DVP_D2 140 /* Data Bit 2 */ +#define K210_PCF_DVP_D3 141 /* Data Bit 3 */ +#define K210_PCF_DVP_D4 142 /* Data Bit 4 */ +#define K210_PCF_DVP_D5 143 /* Data Bit 5 */ +#define K210_PCF_DVP_D6 144 /* Data Bit 6 */ +#define K210_PCF_DVP_D7 145 /* Data Bit 7 */ +#define K210_PCF_SCCB_SCLK 146 /* Serial Camera Control Bus Clock */ +#define K210_PCF_SCCB_SDA 147 /* Serial Camera Control Bus Data */ +#define K210_PCF_UART1_CTS 148 /* UART1 Clear To Send */ +#define K210_PCF_UART1_DSR 149 /* UART1 Data Set Ready */ +#define K210_PCF_UART1_DCD 150 /* UART1 Data Carrier Detect */ +#define K210_PCF_UART1_RI 151 /* UART1 Ring Indicator */ +#define K210_PCF_UART1_SIR_IN 152 /* UART1 Serial Infrared Input */ +#define K210_PCF_UART1_DTR 153 /* UART1 Data Terminal Ready */ +#define K210_PCF_UART1_RTS 154 /* UART1 Request To Send */ +#define K210_PCF_UART1_OUT2 155 /* UART1 User-designated Output 2 */ +#define K210_PCF_UART1_OUT1 156 /* UART1 User-designated Output 1 */ +#define K210_PCF_UART1_SIR_OUT 157 /* UART1 Serial Infrared Output */ +#define K210_PCF_UART1_BAUD 158 /* UART1 Transmit Clock Output */ +#define K210_PCF_UART1_RE 159 /* UART1 Receiver Output Enable */ +#define K210_PCF_UART1_DE 160 /* UART1 Driver Output Enable */ +#define K210_PCF_UART1_RS485_EN 161 /* UART1 RS485 Enable */ +#define K210_PCF_UART2_CTS 162 /* UART2 Clear To Send */ +#define K210_PCF_UART2_DSR 163 /* UART2 Data Set Ready */ +#define K210_PCF_UART2_DCD 164 /* UART2 Data Carrier Detect */ +#define K210_PCF_UART2_RI 165 /* UART2 Ring Indicator */ +#define K210_PCF_UART2_SIR_IN 166 /* UART2 Serial Infrared Input */ +#define K210_PCF_UART2_DTR 167 /* UART2 Data Terminal Ready */ +#define K210_PCF_UART2_RTS 168 /* UART2 Request To Send */ +#define K210_PCF_UART2_OUT2 169 /* UART2 User-designated Output 2 */ +#define K210_PCF_UART2_OUT1 170 /* UART2 User-designated Output 1 */ +#define K210_PCF_UART2_SIR_OUT 171 /* UART2 Serial Infrared Output */ +#define K210_PCF_UART2_BAUD 172 /* UART2 Transmit Clock Output */ +#define K210_PCF_UART2_RE 173 /* UART2 Receiver Output Enable */ +#define K210_PCF_UART2_DE 174 /* UART2 Driver Output Enable */ +#define K210_PCF_UART2_RS485_EN 175 /* UART2 RS485 Enable */ +#define K210_PCF_UART3_CTS 176 /* UART3 Clear To Send */ +#define K210_PCF_UART3_DSR 177 /* UART3 Data Set Ready */ +#define K210_PCF_UART3_DCD 178 /* UART3 Data Carrier Detect */ +#define K210_PCF_UART3_RI 179 /* UART3 Ring Indicator */ +#define K210_PCF_UART3_SIR_IN 180 /* UART3 Serial Infrared Input */ +#define K210_PCF_UART3_DTR 181 /* UART3 Data Terminal Ready */ +#define K210_PCF_UART3_RTS 182 /* UART3 Request To Send */ +#define K210_PCF_UART3_OUT2 183 /* UART3 User-designated Output 2 */ +#define K210_PCF_UART3_OUT1 184 /* UART3 User-designated Output 1 */ +#define K210_PCF_UART3_SIR_OUT 185 /* UART3 Serial Infrared Output */ +#define K210_PCF_UART3_BAUD 186 /* UART3 Transmit Clock Output */ +#define K210_PCF_UART3_RE 187 /* UART3 Receiver Output Enable */ +#define K210_PCF_UART3_DE 188 /* UART3 Driver Output Enable */ +#define K210_PCF_UART3_RS485_EN 189 /* UART3 RS485 Enable */ +#define K210_PCF_TIMER0_TOGGLE1 190 /* TIMER0 Toggle Output 1 */ +#define K210_PCF_TIMER0_TOGGLE2 191 /* TIMER0 Toggle Output 2 */ +#define K210_PCF_TIMER0_TOGGLE3 192 /* TIMER0 Toggle Output 3 */ +#define K210_PCF_TIMER0_TOGGLE4 193 /* TIMER0 Toggle Output 4 */ +#define K210_PCF_TIMER1_TOGGLE1 194 /* TIMER1 Toggle Output 1 */ +#define K210_PCF_TIMER1_TOGGLE2 195 /* TIMER1 Toggle Output 2 */ +#define K210_PCF_TIMER1_TOGGLE3 196 /* TIMER1 Toggle Output 3 */ +#define K210_PCF_TIMER1_TOGGLE4 197 /* TIMER1 Toggle Output 4 */ +#define K210_PCF_TIMER2_TOGGLE1 198 /* TIMER2 Toggle Output 1 */ +#define K210_PCF_TIMER2_TOGGLE2 199 /* TIMER2 Toggle Output 2 */ +#define K210_PCF_TIMER2_TOGGLE3 200 /* TIMER2 Toggle Output 3 */ +#define K210_PCF_TIMER2_TOGGLE4 201 /* TIMER2 Toggle Output 4 */ +#define K210_PCF_CLK_SPI2 202 /* Clock SPI2 */ +#define K210_PCF_CLK_I2C2 203 /* Clock I2C2 */ +#define K210_PCF_INTERNAL0 204 /* Internal function signal 0 */ +#define K210_PCF_INTERNAL1 205 /* Internal function signal 1 */ +#define K210_PCF_INTERNAL2 206 /* Internal function signal 2 */ +#define K210_PCF_INTERNAL3 207 /* Internal function signal 3 */ +#define K210_PCF_INTERNAL4 208 /* Internal function signal 4 */ +#define K210_PCF_INTERNAL5 209 /* Internal function signal 5 */ +#define K210_PCF_INTERNAL6 210 /* Internal function signal 6 */ +#define K210_PCF_INTERNAL7 211 /* Internal function signal 7 */ +#define K210_PCF_INTERNAL8 212 /* Internal function signal 8 */ +#define K210_PCF_INTERNAL9 213 /* Internal function signal 9 */ +#define K210_PCF_INTERNAL10 214 /* Internal function signal 10 */ +#define K210_PCF_INTERNAL11 215 /* Internal function signal 11 */ +#define K210_PCF_INTERNAL12 216 /* Internal function signal 12 */ +#define K210_PCF_INTERNAL13 217 /* Internal function signal 13 */ +#define K210_PCF_INTERNAL14 218 /* Internal function signal 14 */ +#define K210_PCF_INTERNAL15 219 /* Internal function signal 15 */ +#define K210_PCF_INTERNAL16 220 /* Internal function signal 16 */ +#define K210_PCF_INTERNAL17 221 /* Internal function signal 17 */ +#define K210_PCF_CONSTANT 222 /* Constant function */ +#define K210_PCF_INTERNAL18 223 /* Internal function signal 18 */ +#define K210_PCF_DEBUG0 224 /* Debug function 0 */ +#define K210_PCF_DEBUG1 225 /* Debug function 1 */ +#define K210_PCF_DEBUG2 226 /* Debug function 2 */ +#define K210_PCF_DEBUG3 227 /* Debug function 3 */ +#define K210_PCF_DEBUG4 228 /* Debug function 4 */ +#define K210_PCF_DEBUG5 229 /* Debug function 5 */ +#define K210_PCF_DEBUG6 230 /* Debug function 6 */ +#define K210_PCF_DEBUG7 231 /* Debug function 7 */ +#define K210_PCF_DEBUG8 232 /* Debug function 8 */ +#define K210_PCF_DEBUG9 233 /* Debug function 9 */ +#define K210_PCF_DEBUG10 234 /* Debug function 10 */ +#define K210_PCF_DEBUG11 235 /* Debug function 11 */ +#define K210_PCF_DEBUG12 236 /* Debug function 12 */ +#define K210_PCF_DEBUG13 237 /* Debug function 13 */ +#define K210_PCF_DEBUG14 238 /* Debug function 14 */ +#define K210_PCF_DEBUG15 239 /* Debug function 15 */ +#define K210_PCF_DEBUG16 240 /* Debug function 16 */ +#define K210_PCF_DEBUG17 241 /* Debug function 17 */ +#define K210_PCF_DEBUG18 242 /* Debug function 18 */ +#define K210_PCF_DEBUG19 243 /* Debug function 19 */ +#define K210_PCF_DEBUG20 244 /* Debug function 20 */ +#define K210_PCF_DEBUG21 245 /* Debug function 21 */ +#define K210_PCF_DEBUG22 246 /* Debug function 22 */ +#define K210_PCF_DEBUG23 247 /* Debug function 23 */ +#define K210_PCF_DEBUG24 248 /* Debug function 24 */ +#define K210_PCF_DEBUG25 249 /* Debug function 25 */ +#define K210_PCF_DEBUG26 250 /* Debug function 26 */ +#define K210_PCF_DEBUG27 251 /* Debug function 27 */ +#define K210_PCF_DEBUG28 252 /* Debug function 28 */ +#define K210_PCF_DEBUG29 253 /* Debug function 29 */ +#define K210_PCF_DEBUG30 254 /* Debug function 30 */ +#define K210_PCF_DEBUG31 255 /* Debug function 31 */ + +#define K210_FPIOA(pin, func) (((pin) << 16) | (func)) + +#define K210_PC_POWER_3V3 0 +#define K210_PC_POWER_1V8 1 + +#endif /* PINCTRL_K210_FPIOA_H */ diff --git a/include/dt-bindings/power/mt8167-power.h b/include/dt-bindings/power/mt8167-power.h new file mode 100644 index 000000000000..c8ec9983a4bc --- /dev/null +++ b/include/dt-bindings/power/mt8167-power.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT8167_POWER_H +#define _DT_BINDINGS_POWER_MT8167_POWER_H + +#define MT8167_POWER_DOMAIN_MM 0 +#define MT8167_POWER_DOMAIN_VDEC 1 +#define MT8167_POWER_DOMAIN_ISP 2 +#define MT8167_POWER_DOMAIN_CONN 3 +#define MT8167_POWER_DOMAIN_MFG_ASYNC 4 +#define MT8167_POWER_DOMAIN_MFG_2D 5 +#define MT8167_POWER_DOMAIN_MFG 6 + +#endif /* _DT_BINDINGS_POWER_MT8167_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 7714487ac76b..d711e250cf2c 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -94,6 +94,15 @@ #define MSM8976_VDDMX_AO 4 #define MSM8976_VDDMX_VFL 5 +/* MSM8994 Power Domain Indexes */ +#define MSM8994_VDDCX 0 +#define MSM8994_VDDCX_AO 1 +#define MSM8994_VDDCX_VFC 2 +#define MSM8994_VDDMX 3 +#define MSM8994_VDDMX_AO 4 +#define MSM8994_VDDGFX 5 +#define MSM8994_VDDGFX_VFC 6 + /* MSM8996 Power Domain Indexes */ #define MSM8996_VDDCX 0 #define MSM8996_VDDCX_AO 1 diff --git a/include/dt-bindings/reset-controller/mt8192-resets.h b/include/dt-bindings/reset-controller/mt8192-resets.h new file mode 100644 index 000000000000..be9a7ca245b9 --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8192-resets.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + * Author: Yong Liang <yong.liang@mediatek.com> + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8192 + +#define MT8192_TOPRGU_MM_SW_RST 1 +#define MT8192_TOPRGU_MFG_SW_RST 2 +#define MT8192_TOPRGU_VENC_SW_RST 3 +#define MT8192_TOPRGU_VDEC_SW_RST 4 +#define MT8192_TOPRGU_IMG_SW_RST 5 +#define MT8192_TOPRGU_MD_SW_RST 7 +#define MT8192_TOPRGU_CONN_SW_RST 9 +#define MT8192_TOPRGU_CONN_MCU_SW_RST 12 +#define MT8192_TOPRGU_IPU0_SW_RST 14 +#define MT8192_TOPRGU_IPU1_SW_RST 15 +#define MT8192_TOPRGU_AUDIO_SW_RST 17 +#define MT8192_TOPRGU_CAMSYS_SW_RST 18 +#define MT8192_TOPRGU_MJC_SW_RST 19 +#define MT8192_TOPRGU_C2K_S2_SW_RST 20 +#define MT8192_TOPRGU_C2K_SW_RST 21 +#define MT8192_TOPRGU_PERI_SW_RST 22 +#define MT8192_TOPRGU_PERI_AO_SW_RST 23 + +#define MT8192_TOPRGU_SW_RST_NUM 23 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */ diff --git a/include/dt-bindings/reset/k210-rst.h b/include/dt-bindings/reset/k210-rst.h new file mode 100644 index 000000000000..883c1aed50e8 --- /dev/null +++ b/include/dt-bindings/reset/k210-rst.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com> + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef RESET_K210_SYSCTL_H +#define RESET_K210_SYSCTL_H + +/* + * Kendryte K210 SoC system controller K210_SYSCTL_SOFT_RESET register bits. + * Taken from Kendryte SDK (kendryte-standalone-sdk). + */ +#define K210_RST_ROM 0 +#define K210_RST_DMA 1 +#define K210_RST_AI 2 +#define K210_RST_DVP 3 +#define K210_RST_FFT 4 +#define K210_RST_GPIO 5 +#define K210_RST_SPI0 6 +#define K210_RST_SPI1 7 +#define K210_RST_SPI2 8 +#define K210_RST_SPI3 9 +#define K210_RST_I2S0 10 +#define K210_RST_I2S1 11 +#define K210_RST_I2S2 12 +#define K210_RST_I2C0 13 +#define K210_RST_I2C1 14 +#define K210_RST_I2C2 15 +#define K210_RST_UART1 16 +#define K210_RST_UART2 17 +#define K210_RST_UART3 18 +#define K210_RST_AES 19 +#define K210_RST_FPIOA 20 +#define K210_RST_TIMER0 21 +#define K210_RST_TIMER1 22 +#define K210_RST_TIMER2 23 +#define K210_RST_WDT0 24 +#define K210_RST_WDT1 25 +#define K210_RST_SHA 26 +#define K210_RST_RTC 29 + +#endif /* RESET_K210_SYSCTL_H */ diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h index 01c84dba49a4..7950e799c76d 100644 --- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h +++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h @@ -13,5 +13,6 @@ #define RST_R_APB2_I2C 4 #define RST_R_APB1_IR 5 #define RST_R_APB1_W1 6 +#define RST_R_APB2_RSB 7 #endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h new file mode 100644 index 000000000000..cb6285a8d128 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-h616-ccu.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (C) 2020 Arm Ltd. + */ + +#ifndef _DT_BINDINGS_RESET_SUN50I_H616_H_ +#define _DT_BINDINGS_RESET_SUN50I_H616_H_ + +#define RST_MBUS 0 +#define RST_BUS_DE 1 +#define RST_BUS_DEINTERLACE 2 +#define RST_BUS_GPU 3 +#define RST_BUS_CE 4 +#define RST_BUS_VE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_HSTIMER 7 +#define RST_BUS_DBG 8 +#define RST_BUS_PSI 9 +#define RST_BUS_PWM 10 +#define RST_BUS_IOMMU 11 +#define RST_BUS_DRAM 12 +#define RST_BUS_NAND 13 +#define RST_BUS_MMC0 14 +#define RST_BUS_MMC1 15 +#define RST_BUS_MMC2 16 +#define RST_BUS_UART0 17 +#define RST_BUS_UART1 18 +#define RST_BUS_UART2 19 +#define RST_BUS_UART3 20 +#define RST_BUS_UART4 21 +#define RST_BUS_UART5 22 +#define RST_BUS_I2C0 23 +#define RST_BUS_I2C1 24 +#define RST_BUS_I2C2 25 +#define RST_BUS_I2C3 26 +#define RST_BUS_I2C4 27 +#define RST_BUS_SPI0 28 +#define RST_BUS_SPI1 29 +#define RST_BUS_EMAC0 30 +#define RST_BUS_EMAC1 31 +#define RST_BUS_TS 32 +#define RST_BUS_THS 33 +#define RST_BUS_SPDIF 34 +#define RST_BUS_DMIC 35 +#define RST_BUS_AUDIO_CODEC 36 +#define RST_BUS_AUDIO_HUB 37 +#define RST_USB_PHY0 38 +#define RST_USB_PHY1 39 +#define RST_USB_PHY2 40 +#define RST_USB_PHY3 41 +#define RST_BUS_OHCI0 42 +#define RST_BUS_OHCI1 43 +#define RST_BUS_OHCI2 44 +#define RST_BUS_OHCI3 45 +#define RST_BUS_EHCI0 46 +#define RST_BUS_EHCI1 47 +#define RST_BUS_EHCI2 48 +#define RST_BUS_EHCI3 49 +#define RST_BUS_OTG 50 +#define RST_BUS_HDMI 51 +#define RST_BUS_HDMI_SUB 52 +#define RST_BUS_TCON_TOP 53 +#define RST_BUS_TCON_TV0 54 +#define RST_BUS_TCON_TV1 55 +#define RST_BUS_TVE_TOP 56 +#define RST_BUS_TVE0 57 +#define RST_BUS_HDCP 58 +#define RST_BUS_KEYADC 59 + +#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */ diff --git a/include/dt-bindings/soc/bcm-pmb.h b/include/dt-bindings/soc/bcm-pmb.h new file mode 100644 index 000000000000..744dc3af4d41 --- /dev/null +++ b/include/dt-bindings/soc/bcm-pmb.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ + +#ifndef __DT_BINDINGS_SOC_BCM_PMB_H +#define __DT_BINDINGS_SOC_BCM_PMB_H + +#define BCM_PMB_PCIE0 0x01 +#define BCM_PMB_PCIE1 0x02 +#define BCM_PMB_PCIE2 0x03 +#define BCM_PMB_HOST_USB 0x04 + +#endif diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h index 3c3e16c0aadb..dc605c4bc224 100644 --- a/include/dt-bindings/sound/apq8016-lpass.h +++ b/include/dt-bindings/sound/apq8016-lpass.h @@ -2,9 +2,8 @@ #ifndef __DT_APQ8016_LPASS_H #define __DT_APQ8016_LPASS_H -#define MI2S_PRIMARY 0 -#define MI2S_SECONDARY 1 -#define MI2S_TERTIARY 2 -#define MI2S_QUATERNARY 3 +#include <dt-bindings/sound/qcom,lpass.h> + +/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */ #endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h new file mode 100644 index 000000000000..7b0b80b38699 --- /dev/null +++ b/include/dt-bindings/sound/qcom,lpass.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_QCOM_LPASS_H +#define __DT_QCOM_LPASS_H + +#define MI2S_PRIMARY 0 +#define MI2S_SECONDARY 1 +#define MI2S_TERTIARY 2 +#define MI2S_QUATERNARY 3 +#define MI2S_QUINARY 4 + +#define LPASS_DP_RX 5 + +#define LPASS_MCLK0 0 + +#endif /* __DT_QCOM_LPASS_H */ diff --git a/include/dt-bindings/sound/sc7180-lpass.h b/include/dt-bindings/sound/sc7180-lpass.h index 56ecaafd2dc6..5c1ee8b36b19 100644 --- a/include/dt-bindings/sound/sc7180-lpass.h +++ b/include/dt-bindings/sound/sc7180-lpass.h @@ -2,10 +2,8 @@ #ifndef __DT_SC7180_LPASS_H #define __DT_SC7180_LPASS_H -#define MI2S_PRIMARY 0 -#define MI2S_SECONDARY 1 -#define LPASS_DP_RX 2 +#include <dt-bindings/sound/qcom,lpass.h> -#define LPASS_MCLK0 0 +/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */ #endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h index 0352893697f0..fef3ef65967f 100644 --- a/include/dt-bindings/usb/pd.h +++ b/include/dt-bindings/usb/pd.h @@ -93,4 +93,313 @@ #define FRS_DEFAULT_POWER 1 #define FRS_5V_1P5A 2 #define FRS_5V_3A 3 - #endif /* __DT_POWER_DELIVERY_H */ + +/* + * SVDM Identity Header + * -------------------- + * <31> :: data capable as a USB host + * <30> :: data capable as a USB device + * <29:27> :: product type (UFP / Cable / VPD) + * <26> :: modal operation supported (1b == yes) + * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0) + * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0) + * <20:16> :: Reserved, Shall be set to zero + * <15:0> :: USB-IF assigned VID for this cable vendor + */ +/* SOP Product Type (UFP) */ +#define IDH_PTYPE_NOT_UFP 0 +#define IDH_PTYPE_HUB 1 +#define IDH_PTYPE_PERIPH 2 +#define IDH_PTYPE_PSD 3 +#define IDH_PTYPE_AMA 5 + +/* SOP' Product Type (Cable Plug / VPD) */ +#define IDH_PTYPE_NOT_CABLE 0 +#define IDH_PTYPE_PCABLE 3 +#define IDH_PTYPE_ACABLE 4 +#define IDH_PTYPE_VPD 6 + +/* SOP Product Type (DFP) */ +#define IDH_PTYPE_NOT_DFP 0 +#define IDH_PTYPE_DFP_HUB 1 +#define IDH_PTYPE_DFP_HOST 2 +#define IDH_PTYPE_DFP_PB 3 + +#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \ + ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \ + | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \ + | ((vid) & 0xffff)) + +/* + * Cert Stat VDO + * ------------- + * <31:0> : USB-IF assigned XID for this cable + */ +#define VDO_CERT(xid) ((xid) & 0xffffffff) + +/* + * Product VDO + * ----------- + * <31:16> : USB Product ID + * <15:0> : USB bcdDevice + */ +#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff)) + +/* + * UFP VDO (PD Revision 3.0+ only) + * -------- + * <31:29> :: UFP VDO version + * <28> :: Reserved + * <27:24> :: Device capability + * <23:22> :: Connector type (10b == receptacle, 11b == captive plug) + * <21:11> :: Reserved + * <10:8> :: Vconn power (AMA only) + * <7> :: Vconn required (AMA only, 0b == no, 1b == yes) + * <6> :: Vbus required (AMA only, 0b == yes, 1b == no) + * <5:3> :: Alternate modes + * <2:0> :: USB highest speed + */ +/* UFP VDO Version */ +#define UFP_VDO_VER1_2 2 + +/* Device Capability */ +#define DEV_USB2_CAPABLE BIT(0) +#define DEV_USB2_BILLBOARD BIT(1) +#define DEV_USB3_CAPABLE BIT(2) +#define DEV_USB4_CAPABLE BIT(3) + +/* Connector Type */ +#define UFP_RECEPTACLE 2 +#define UFP_CAPTIVE 3 + +/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */ +#define AMA_VCONN_PWR_1W 0 +#define AMA_VCONN_PWR_1W5 1 +#define AMA_VCONN_PWR_2W 2 +#define AMA_VCONN_PWR_3W 3 +#define AMA_VCONN_PWR_4W 4 +#define AMA_VCONN_PWR_5W 5 +#define AMA_VCONN_PWR_6W 6 + +/* Vconn Required (AMA only) */ +#define AMA_VCONN_NOT_REQ 0 +#define AMA_VCONN_REQ 1 + +/* Vbus Required (AMA only) */ +#define AMA_VBUS_REQ 0 +#define AMA_VBUS_NOT_REQ 1 + +/* Alternate Modes */ +#define UFP_ALTMODE_NOT_SUPP 0 +#define UFP_ALTMODE_TBT3 BIT(0) +#define UFP_ALTMODE_RECFG BIT(1) +#define UFP_ALTMODE_NO_RECFG BIT(2) + +/* USB Highest Speed */ +#define UFP_USB2_ONLY 0 +#define UFP_USB32_GEN1 1 +#define UFP_USB32_4_GEN2 2 +#define UFP_USB4_GEN3 3 + +#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \ + (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \ + | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \ + | ((spd) & 0x7)) + +/* + * DFP VDO (PD Revision 3.0+ only) + * -------- + * <31:29> :: DFP VDO version + * <28:27> :: Reserved + * <26:24> :: Host capability + * <23:22> :: Connector type (10b == receptacle, 11b == captive plug) + * <21:5> :: Reserved + * <4:0> :: Port number + */ +#define DFP_VDO_VER1_1 1 +#define HOST_USB2_CAPABLE BIT(0) +#define HOST_USB3_CAPABLE BIT(1) +#define HOST_USB4_CAPABLE BIT(2) +#define DFP_RECEPTACLE 2 +#define DFP_CAPTIVE 3 + +#define VDO_DFP(ver, cap, conn, pnum) \ + (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \ + | ((pnum) & 0x1f)) + +/* + * Passive Cable VDO + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:21> :: VDO version + * <20> :: Reserved, Shall be set to zero + * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive) + * <17> :: Reserved, Shall be set to zero + * <16:13> :: cable latency (0001 == <10ns(~1m length)) + * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req) + * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <8:7> :: Reserved, Shall be set to zero + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) + * <4:3> :: Reserved, Shall be set to zero + * <2:0> :: USB highest speed + * + * Active Cable VDO 1 + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:21> :: VDO version + * <20> :: Reserved, Shall be set to zero + * <19:18> :: Connector type (10b == C, 11b == Captive) + * <17> :: Reserved, Shall be set to zero + * <16:13> :: cable latency (0001 == <10ns(~1m length)) + * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req) + * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <8> :: SBU supported (0b == supported, 1b == not supported) + * <7> :: SBU type (0b == passive, 1b == active) + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) + * <4> :: Vbus through cable (0b == no, 1b == yes) + * <3> :: SOP" controller present? (0b == no, 1b == yes) + * <2:0> :: USB highest speed + */ +/* Cable VDO Version */ +#define CABLE_VDO_VER1_0 0 +#define CABLE_VDO_VER1_3 3 + +/* Connector Type */ +#define CABLE_CTYPE 2 +#define CABLE_CAPTIVE 3 + +/* Cable Latency */ +#define CABLE_LATENCY_1M 1 +#define CABLE_LATENCY_2M 2 +#define CABLE_LATENCY_3M 3 +#define CABLE_LATENCY_4M 4 +#define CABLE_LATENCY_5M 5 +#define CABLE_LATENCY_6M 6 +#define CABLE_LATENCY_7M 7 +#define CABLE_LATENCY_7M_PLUS 8 + +/* Cable Termination Type */ +#define PCABLE_VCONN_NOT_REQ 0 +#define PCABLE_VCONN_REQ 1 +#define ACABLE_ONE_END 2 +#define ACABLE_BOTH_END 3 + +/* Maximum Vbus Voltage */ +#define CABLE_MAX_VBUS_20V 0 +#define CABLE_MAX_VBUS_30V 1 +#define CABLE_MAX_VBUS_40V 2 +#define CABLE_MAX_VBUS_50V 3 + +/* Active Cable SBU Supported/Type */ +#define ACABLE_SBU_SUPP 0 +#define ACABLE_SBU_NOT_SUPP 1 +#define ACABLE_SBU_PASSIVE 0 +#define ACABLE_SBU_ACTIVE 1 + +/* Vbus Current Handling Capability */ +#define CABLE_CURR_DEF 0 +#define CABLE_CURR_3A 1 +#define CABLE_CURR_5A 2 + +/* USB Highest Speed */ +#define CABLE_USB2_ONLY 0 +#define CABLE_USB32_GEN1 1 +#define CABLE_USB32_4_GEN2 2 +#define CABLE_USB4_GEN3 3 + +#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \ + | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7)) +#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \ + | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \ + | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7)) + +/* + * Active Cable VDO 2 + * --------- + * <31:24> :: Maximum operating temperature + * <23:16> :: Shutdown temperature + * <15> :: Reserved, Shall be set to zero + * <14:12> :: U3/CLd power + * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S) + * <10> :: Physical connection (0b == copper, 1b == optical) + * <9> :: Active element (0b == redriver, 1b == retimer) + * <8> :: USB4 supported (0b == yes, 1b == no) + * <7:6> :: USB2 hub hops consumed + * <5> :: USB2 supported (0b == yes, 1b == no) + * <4> :: USB3.2 supported (0b == yes, 1b == no) + * <3> :: USB lanes supported (0b == one lane, 1b == two lanes) + * <2> :: Optically isolated active cable (0b == no, 1b == yes) + * <1> :: Reserved, Shall be set to zero + * <0> :: USB gen (0b == gen1, 1b == gen2+) + */ +/* U3/CLd Power*/ +#define ACAB2_U3_CLD_10MW_PLUS 0 +#define ACAB2_U3_CLD_10MW 1 +#define ACAB2_U3_CLD_5MW 2 +#define ACAB2_U3_CLD_1MW 3 +#define ACAB2_U3_CLD_500UW 4 +#define ACAB2_U3_CLD_200UW 5 +#define ACAB2_U3_CLD_50UW 6 + +/* Other Active Cable VDO 2 Fields */ +#define ACAB2_U3U0_DIRECT 0 +#define ACAB2_U3U0_U3S 1 +#define ACAB2_PHY_COPPER 0 +#define ACAB2_PHY_OPTICAL 1 +#define ACAB2_REDRIVER 0 +#define ACAB2_RETIMER 1 +#define ACAB2_USB4_SUPP 0 +#define ACAB2_USB4_NOT_SUPP 1 +#define ACAB2_USB2_SUPP 0 +#define ACAB2_USB2_NOT_SUPP 1 +#define ACAB2_USB32_SUPP 0 +#define ACAB2_USB32_NOT_SUPP 1 +#define ACAB2_LANES_ONE 0 +#define ACAB2_LANES_TWO 1 +#define ACAB2_OPT_ISO_NO 0 +#define ACAB2_OPT_ISO_YES 1 +#define ACAB2_GEN_1 0 +#define ACAB2_GEN_2_PLUS 1 + +#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \ + (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \ + | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \ + | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \ + | (iso) << 2 | (gen)) + +/* + * VPD VDO + * --------- + * <31:28> :: HW version + * <27:24> :: FW version + * <23:21> :: VDO version + * <20:17> :: Reserved, Shall be set to zero + * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <14> :: Charge through current support (0b == 3A, 1b == 5A) + * <13> :: Reserved, Shall be set to zero + * <12:7> :: Vbus impedance + * <6:1> :: Ground impedance + * <0> :: Charge through support (0b == no, 1b == yes) + */ +#define VPD_VDO_VER1_0 0 +#define VPD_MAX_VBUS_20V 0 +#define VPD_MAX_VBUS_30V 1 +#define VPD_MAX_VBUS_40V 2 +#define VPD_MAX_VBUS_50V 3 +#define VPDCT_CURR_3A 0 +#define VPDCT_CURR_5A 1 +#define VPDCT_NOT_SUPP 0 +#define VPDCT_SUPP 1 + +#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \ + | ((gi) & 0x3f) << 1 | (ct)) + +#endif /* __DT_POWER_DELIVERY_H */ diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h index 38afb341c3f2..abfcbe02001a 100644 --- a/include/keys/encrypted-type.h +++ b/include/keys/encrypted-type.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2010 IBM Corporation * Copyright (C) 2010 Politecnico di Torino, Italy - * TORSEC group -- http://security.polito.it + * TORSEC group -- https://security.polito.it * * Authors: * Mimi Zohar <zohar@us.ibm.com> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 8dcb3e1477bc..6fd3cda608e4 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -13,6 +13,13 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) +DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); + +static __always_inline bool kvm_arm_support_pmu_v3(void) +{ + return static_branch_likely(&kvm_arm_pmu_available); +} + #ifdef CONFIG_HW_PERF_EVENTS struct kvm_pmc { @@ -47,7 +54,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); -bool kvm_arm_support_pmu_v3(void); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -87,7 +93,6 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} -static inline bool kvm_arm_support_pmu_v3(void) { return false; } static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 053bf05fb1f7..fcdaab723916 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -546,9 +546,19 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 +#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; +extern bool osc_sb_native_usb4_support_confirmed; + +/* USB4 Capabilities */ +#define OSC_USB_USB3_TUNNELING 0x00000001 +#define OSC_USB_DP_TUNNELING 0x00000002 +#define OSC_USB_PCIE_TUNNELING 0x00000004 +#define OSC_USB_XDOMAIN 0x00000008 + +extern u32 osc_sb_native_usb4_control; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@ -581,9 +591,6 @@ extern bool osc_pc_lpi_support_confirmed; #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F -extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, - u32 *mask, u32 req); - /* Enable _OST when all relevant hotplug operations are enabled */ #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ @@ -739,12 +746,12 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) static inline void acpi_dev_put(struct acpi_device *adev) {} -static inline bool is_acpi_node(struct fwnode_handle *fwnode) +static inline bool is_acpi_node(const struct fwnode_handle *fwnode) { return false; } -static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) { return false; } @@ -754,7 +761,7 @@ static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwno return NULL; } -static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) { return false; } @@ -1072,19 +1079,25 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); -int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); +int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } -static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, + const char *name, int index) { return -ENXIO; } #endif +static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +{ + return acpi_dev_gpio_irq_get_by(adev, NULL, index); +} + /* Device properties */ #ifdef CONFIG_ACPI @@ -1114,14 +1127,6 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); -int acpi_dev_prop_read_single(struct acpi_device *adev, - const char *propname, enum dev_prop_type proptype, - void *val); -int acpi_node_prop_read(const struct fwnode_handle *fwnode, - const char *propname, enum dev_prop_type proptype, - void *val, size_t nval); -int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, - enum dev_prop_type proptype, void *val, size_t nval); struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); @@ -1223,30 +1228,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, return -ENXIO; } -static inline int acpi_dev_prop_read_single(const struct acpi_device *adev, - const char *propname, - enum dev_prop_type proptype, - void *val) -{ - return -ENXIO; -} - -static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode, - const char *propname, - enum dev_prop_type proptype, - void *val, size_t nval) -{ - return -ENXIO; -} - -static inline int acpi_dev_prop_read(const struct acpi_device *adev, - const char *propname, - enum dev_prop_type proptype, - void *val, size_t nval) -{ - return -ENXIO; -} - static inline struct fwnode_handle * acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 0bbfd647f5c6..6cc93ab5b809 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -76,7 +76,7 @@ struct amba_device { struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); - int (*remove)(struct amba_device *); + void (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); const struct amba_id *id_table; }; diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index d0d7d96261ad..71881a2b6f78 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -10,12 +10,17 @@ #define _LINUX_ANON_INODES_H struct file_operations; +struct inode; struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags); +int anon_inode_getfd_secure(const char *name, + const struct file_operations *fops, + void *priv, int flags, + const struct inode *context_inode); #endif /* _LINUX_ANON_INODES_H */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index f860645f6512..62c54234576c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -102,6 +102,37 @@ ARM_SMCCC_OWNER_STANDARD_HYP, \ 0x21) +/* TRNG entropy source calls (defined by ARM DEN0098) */ +#define ARM_SMCCC_TRNG_VERSION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x50) + +#define ARM_SMCCC_TRNG_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x51) + +#define ARM_SMCCC_TRNG_GET_UUID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x52) + +#define ARM_SMCCC_TRNG_RND32 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x53) + +#define ARM_SMCCC_TRNG_RND64 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD, \ + 0x53) + /* * Return codes defined in ARM DEN 0070A * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index d7493016cd46..9b02961d65ee 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -151,7 +151,7 @@ struct atm_dev { const char *type; /* device type name */ int number; /* device index */ void *dev_data; /* per-device data */ - void *phy_data; /* private PHY date */ + void *phy_data; /* private PHY data */ unsigned long flags; /* device flags (ATM_DF_*) */ struct list_head local; /* local ATM addresses */ struct list_head lecs; /* LECS ATM addresses learned via ILMI */ @@ -207,7 +207,7 @@ struct atm_skb_data { struct atm_vcc *vcc; /* ATM VCC */ unsigned long atm_options; /* ATM layer options */ unsigned int acct_truesize; /* truesize accounted to vcc */ -}; +} __packed; #define VCC_HTABLE_SIZE 32 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 0571701ab1c5..0abd93efc181 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -73,6 +73,10 @@ struct linux_binprm { #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) +/* preserve argv0 for the interpreter */ +#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3 +#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT) + /* Function parameter for binfmt->coredump */ struct coredump_params { const kernel_siginfo_t *siginfo; diff --git a/include/linux/bio.h b/include/linux/bio.h index 1edda614f7ce..d0246c92a6e8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -10,6 +10,7 @@ #include <linux/ioprio.h> /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include <linux/blk_types.h> +#include <linux/uio.h> #define BIO_DEBUG @@ -19,7 +20,12 @@ #define BIO_BUG_ON #endif -#define BIO_MAX_PAGES 256 +#define BIO_MAX_VECS 256U + +static inline unsigned int bio_max_segs(unsigned int nr_segs) +{ + return min(nr_segs, BIO_MAX_VECS); +} #define bio_prio(bio) (bio)->bi_ioprio #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) @@ -328,7 +334,6 @@ struct bio_integrity_payload { struct bvec_iter bip_iter; - unsigned short bip_slab; /* slab the bip came from */ unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ @@ -406,7 +411,9 @@ extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); -extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); +struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, + struct bio_set *bs); +struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); @@ -414,16 +421,11 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio_set fs_bio_set; -static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) +static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) { return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); } -static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) -{ - return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); -} - extern blk_qc_t submit_bio(struct bio *); extern void bio_endio(struct bio *); @@ -441,6 +443,18 @@ static inline void bio_wouldblock_error(struct bio *bio) bio_endio(bio); } +/* + * Calculate number of bvec segments that should be allocated to fit data + * pointed by @iter. If @iter is backed by bvec it's going to be reused + * instead of allocating a new one. + */ +static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) +{ + if (iov_iter_is_bvec(iter)) + return 0; + return iov_iter_npages(iter, max_segs); +} + struct request_queue; extern int submit_bio_wait(struct bio *bio); @@ -455,6 +469,8 @@ void bio_chain(struct bio *, struct bio *); extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); +int bio_add_zone_append_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset); bool __bio_try_merge_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off, bool *same_page); void __bio_add_page(struct bio *bio, struct page *page, @@ -478,29 +494,26 @@ static inline void zero_fill_bio(struct bio *bio) zero_fill_bio_iter(bio, bio->bi_iter); } -extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); -extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); -extern unsigned int bvec_nr_vecs(unsigned short idx); extern const char *bio_devname(struct bio *bio, char *buffer); -#define bio_set_dev(bio, bdev) \ -do { \ - if ((bio)->bi_disk != (bdev)->bd_disk) \ - bio_clear_flag(bio, BIO_THROTTLED);\ - (bio)->bi_disk = (bdev)->bd_disk; \ - (bio)->bi_partno = (bdev)->bd_partno; \ - bio_associate_blkg(bio); \ +#define bio_set_dev(bio, bdev) \ +do { \ + bio_clear_flag(bio, BIO_REMAPPED); \ + if ((bio)->bi_bdev != (bdev)) \ + bio_clear_flag(bio, BIO_THROTTLED); \ + (bio)->bi_bdev = (bdev); \ + bio_associate_blkg(bio); \ } while (0) #define bio_copy_dev(dst, src) \ do { \ - (dst)->bi_disk = (src)->bi_disk; \ - (dst)->bi_partno = (src)->bi_partno; \ + bio_clear_flag(dst, BIO_REMAPPED); \ + (dst)->bi_bdev = (src)->bi_bdev; \ bio_clone_blkg_association(dst, src); \ } while (0) #define bio_dev(bio) \ - disk_devt((bio)->bi_disk) + disk_devt((bio)->bi_bdev->bd_disk) #ifdef CONFIG_BLK_CGROUP void bio_associate_blkg(struct bio *bio); @@ -703,6 +716,7 @@ struct bio_set { mempool_t bvec_integrity_pool; #endif + unsigned int back_pad; /* * Deadlock avoidance for stacking block drivers: see comments in * bio_alloc_bioset() for details @@ -713,12 +727,6 @@ struct bio_set { struct workqueue_struct *rescue_workqueue; }; -struct biovec_slab { - int nr_vecs; - char *name; - struct kmem_cache *slab; -}; - static inline bool bioset_initialized(struct bio_set *bs) { return bs->bio_slab != NULL; diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a61f192c096b..a5a48303b0f1 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -214,7 +214,7 @@ static inline int get_count_order_long(unsigned long l) * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * - * On 64 bit arches this is a synomyn for __ffs + * On 64 bit arches this is a synonym for __ffs * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d705b174d346..2c473c9b8990 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,10 +140,6 @@ struct blk_mq_hw_ctx { * shared across request queues. */ atomic_t nr_active; - /** - * @elevator_queued: Number of queued requests on hctx. - */ - atomic_t elevator_queued; /** @cpuhp_online: List to store request if CPU is going to die */ struct hlist_node cpuhp_online; @@ -494,6 +490,18 @@ static inline int blk_mq_request_completed(struct request *rq) return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; } +/* + * + * Set the state to complete when completing a request from inside ->queue_rq. + * This is used by drivers that want to ensure special complete actions that + * need access to the request are called on failure, e.g. by nvme for + * multipathing. + */ +static inline void blk_mq_set_request_complete(struct request *rq) +{ + WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); +} + void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); @@ -602,8 +610,8 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, rq->bio = rq->biotail = bio; rq->ioprio = bio_prio(bio); - if (bio->bi_disk) - rq->rq_disk = bio->bi_disk; + if (bio->bi_bdev) + rq->rq_disk = bio->bi_bdev->bd_disk; } blk_qc_t blk_mq_submit_bio(struct bio *bio); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 866f74261b3b..db026b6ec15a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -222,16 +222,15 @@ static inline void bio_issue_init(struct bio_issue *issue, */ struct bio { struct bio *bi_next; /* request queue link */ - struct gendisk *bi_disk; + struct block_device *bi_bdev; unsigned int bi_opf; /* bottom bits req flags, * top bits REQ_OP. Use * accessors. */ - unsigned short bi_flags; /* status, etc and bvec pool number */ + unsigned short bi_flags; /* BIO_* below */ unsigned short bi_ioprio; unsigned short bi_write_hint; blk_status_t bi_status; - u8 bi_partno; atomic_t __bi_remaining; struct bvec_iter bi_iter; @@ -304,36 +303,10 @@ enum { * of this bio. */ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ + BIO_REMAPPED, BIO_FLAG_LAST }; -/* See BVEC_POOL_OFFSET below before adding new flags */ - -/* - * We support 6 different bvec pools, the last one is magic in that it - * is backed by a mempool. - */ -#define BVEC_POOL_NR 6 -#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) - -/* - * Top 3 bits of bio flags indicate the pool the bvecs came from. We add - * 1 to the actual index so that 0 indicates that there are no bvecs to be - * freed. - */ -#define BVEC_POOL_BITS (3) -#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) -#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) -#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) -# error "BVEC_POOL_BITS is too small" -#endif - -/* - * Flags starting here get preserved by bio_reset() - this includes - * only BVEC_POOL_IDX() - */ -#define BIO_RESET_BITS BVEC_POOL_OFFSET - typedef __u32 __bitwise blk_mq_req_flags_t; /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f94ee3089e01..bc6bc8383b43 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -65,8 +65,6 @@ typedef void (rq_end_io_fn)(struct request *, blk_status_t); * request flags */ typedef __u32 __bitwise req_flags_t; -/* elevator knows about this request */ -#define RQF_SORTED ((__force req_flags_t)(1 << 0)) /* drive already may have started this one */ #define RQF_STARTED ((__force req_flags_t)(1 << 1)) /* may not be passed by ioscheduler */ @@ -153,7 +151,7 @@ struct request { */ union { struct hlist_node hash; /* merge hash */ - struct list_head ipi_list; + struct llist_node ipi_list; }; /* @@ -337,6 +335,7 @@ struct queue_limits { unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; + unsigned int zone_write_granularity; unsigned short max_segments; unsigned short max_integrity_segments; @@ -461,7 +460,6 @@ struct request_queue { #ifdef CONFIG_PM struct device *dev; enum rpm_status rpm_status; - unsigned int nr_pending; #endif /* @@ -948,9 +946,8 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); -extern void blk_execute_rq(struct request_queue *, struct gendisk *, - struct request *, int); -extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, +extern void blk_execute_rq(struct gendisk *, struct request *, int); +extern void blk_execute_rq_nowait(struct gendisk *, struct request *, int, rq_end_io_fn *); /* Helper to convert REQ_OP_XXX to its string format XXX */ @@ -1161,6 +1158,8 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); extern void blk_queue_max_zone_append_sectors(struct request_queue *q, unsigned int max_zone_append_sectors); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +void blk_queue_zone_write_granularity(struct request_queue *q, + unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); void blk_queue_update_readahead(struct request_queue *q); @@ -1289,7 +1288,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) !list_empty(&plug->cb_list)); } -int blkdev_issue_flush(struct block_device *, gfp_t); +int blkdev_issue_flush(struct block_device *bdev); long nr_blockdev_pages(void); #else /* CONFIG_BLOCK */ struct blk_plug { @@ -1317,7 +1316,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) return false; } -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) +static inline int blkdev_issue_flush(struct block_device *bdev) { return 0; } @@ -1474,6 +1473,18 @@ static inline int bdev_io_opt(struct block_device *bdev) return queue_io_opt(bdev_get_queue(bdev)); } +static inline unsigned int +queue_zone_write_granularity(const struct request_queue *q) +{ + return q->limits.zone_write_granularity; +} + +static inline unsigned int +bdev_zone_write_granularity(struct block_device *bdev) +{ + return queue_zone_write_granularity(bdev_get_queue(bdev)); +} + static inline int queue_alignment_offset(const struct request_queue *q) { if (q->limits.misaligned) @@ -1954,21 +1965,9 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, void disk_end_io_acct(struct gendisk *disk, unsigned int op, unsigned long start_time); -unsigned long part_start_io_acct(struct gendisk *disk, - struct block_device **part, struct bio *bio); -void part_end_io_acct(struct block_device *part, struct bio *bio, - unsigned long start_time); - -/** - * bio_start_io_acct - start I/O accounting for bio based drivers - * @bio: bio to start account for - * - * Returns the start time that should be passed back to bio_end_io_acct(). - */ -static inline unsigned long bio_start_io_acct(struct bio *bio) -{ - return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio)); -} +unsigned long bio_start_io_acct(struct bio *bio); +void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, + struct block_device *orig_bdev); /** * bio_end_io_acct - end I/O accounting for bio based drivers @@ -1977,7 +1976,7 @@ static inline unsigned long bio_start_io_acct(struct bio *bio) */ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { - return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); + return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); } int bdev_read_only(struct block_device *bdev); @@ -2012,21 +2011,16 @@ void bdev_add(struct block_device *bdev, dev_t dev); struct block_device *I_BDEV(struct inode *inode); struct block_device *bdgrab(struct block_device *bdev); void bdput(struct block_device *); +int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, + loff_t lend); #ifdef CONFIG_BLOCK void invalidate_bdev(struct block_device *bdev); -int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, - loff_t lend); int sync_blockdev(struct block_device *bdev); #else static inline void invalidate_bdev(struct block_device *bdev) { } -static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode, - loff_t lstart, loff_t lend) -{ - return 0; -} static inline int sync_blockdev(struct block_device *bdev) { return 0; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 05556573b896..a083e15df608 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -23,8 +23,6 @@ struct blk_trace { u32 pid; u32 dev; struct dentry *dir; - struct dentry *dropped_file; - struct dentry *msg_file; struct list_head running_list; atomic_t dropped; }; @@ -119,7 +117,7 @@ struct compat_blk_user_trace_setup { #endif -extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); +void blk_fill_rwbs(char *rwbs, unsigned int op); static inline sector_t blk_rq_trace_sector(struct request *rq) { diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 72e69a0e1e8c..c42e02b4d84b 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,8 +23,8 @@ struct ctl_table_header; #ifdef CONFIG_CGROUP_BPF -extern struct static_key_false cgroup_bpf_enabled_key; -#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) +extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE]; +#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type]) DECLARE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); @@ -125,7 +125,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, enum bpf_attach_type type, - void *t_ctx); + void *t_ctx, + u32 *flags); int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, @@ -147,6 +148,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, int __user *optlen, int max_optlen, int retval); +int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, + int optname, void *optval, + int *optlen, int retval); + static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { @@ -185,7 +190,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ BPF_CGROUP_INET_INGRESS); \ \ @@ -195,7 +200,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ + if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ if (sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ @@ -207,7 +212,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) { \ + if (cgroup_bpf_enabled(type)) { \ __ret = __cgroup_bpf_run_filter_sk(sk, type); \ } \ __ret; \ @@ -227,33 +232,53 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ ({ \ + u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(type)) \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ - NULL); \ + NULL, \ + &__unused_flags); \ __ret; \ }) #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ ({ \ + u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled) { \ + if (cgroup_bpf_enabled(type)) { \ lock_sock(sk); \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ - t_ctx); \ + t_ctx, \ + &__unused_flags); \ release_sock(sk); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL) - -#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL) +/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags + * via upper bits of return code. The only flag that is supported + * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check + * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). + */ +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \ +({ \ + u32 __flags = 0; \ + int __ret = 0; \ + if (cgroup_bpf_enabled(type)) { \ + lock_sock(sk); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + NULL, &__flags); \ + release_sock(sk); \ + if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ + *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \ + } \ + __ret; \ +}) -#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ - sk->sk_prot->pre_connect) +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ + ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \ + cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \ + (sk)->sk_prot->pre_connect) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) @@ -297,7 +322,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ sock_ops, \ BPF_CGROUP_SOCK_OPS); \ @@ -307,7 +332,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled && (sock_ops)->sk) { \ + if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ if (__sk && sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ @@ -320,7 +345,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \ __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ access, \ BPF_CGROUP_DEVICE); \ @@ -332,7 +357,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ buf, count, pos, \ BPF_CGROUP_SYSCTL); \ @@ -343,7 +368,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -354,7 +379,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled) \ + if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ get_user(__ret, optlen); \ __ret; \ }) @@ -363,11 +388,24 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled) \ - __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ - optname, optval, \ - optlen, max_optlen, \ - retval); \ + if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ + !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ + tcp_bpf_bypass_getsockopt, \ + level, optname)) \ + __ret = __cgroup_bpf_run_filter_getsockopt( \ + sock, level, optname, optval, optlen, \ + max_optlen, retval); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ + optlen, retval) \ +({ \ + int __ret = retval; \ + if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ + sock, level, optname, optval, optlen, retval); \ __ret; \ }) @@ -427,15 +465,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, return 0; } -#define cgroup_bpf_enabled (0) +#define cgroup_bpf_enabled(type) (0) #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) @@ -452,6 +489,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ optlen, max_optlen, retval) ({ retval; }) +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ + optlen, retval) ({ retval; }) #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ kernel_optval) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 07cb5d15e743..cccaef1088ea 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -14,7 +14,6 @@ #include <linux/numa.h> #include <linux/mm_types.h> #include <linux/wait.h> -#include <linux/u64_stats_sync.h> #include <linux/refcount.h> #include <linux/mutex.h> #include <linux/module.h> @@ -507,12 +506,6 @@ enum bpf_cgroup_storage_type { */ #define MAX_BPF_FUNC_ARGS 12 -struct bpf_prog_stats { - u64 cnt; - u64 nsecs; - struct u64_stats_sync syncp; -} __aligned(2 * sizeof(u64)); - struct btf_func_model { u8 ret_size; u8 nr_args; @@ -536,7 +529,7 @@ struct btf_func_model { /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ -#define BPF_MAX_TRAMP_PROGS 40 +#define BPF_MAX_TRAMP_PROGS 38 struct bpf_tramp_progs { struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; @@ -568,10 +561,10 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, struct bpf_tramp_progs *tprogs, void *orig_call); /* these two functions are called from generated trampoline */ -u64 notrace __bpf_prog_enter(void); +u64 notrace __bpf_prog_enter(struct bpf_prog *prog); void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); -void notrace __bpf_prog_enter_sleepable(void); -void notrace __bpf_prog_exit_sleepable(void); +u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); +void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); struct bpf_ksym { unsigned long start; @@ -761,9 +754,15 @@ struct bpf_ctx_arg_aux { u32 btf_id; }; +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; + u32 used_btf_cnt; u32 max_ctx_offset; u32 max_pkt_offset; u32 max_tp_access; @@ -802,6 +801,7 @@ struct bpf_prog_aux { const struct bpf_prog_ops *ops; struct bpf_map **used_maps; struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ + struct btf_mod_pair *used_btfs; struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ @@ -838,7 +838,6 @@ struct bpf_prog_aux { u32 linfo_idx; u32 num_exentries; struct exception_table_entry *extable; - struct bpf_prog_stats __percpu *stats; union { struct work_struct work; struct rcu_head rcu; @@ -1066,6 +1065,34 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *include_prog, struct bpf_prog_array **new_array); +/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ +#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) +/* BPF program asks to set CN on the packet. */ +#define BPF_RET_SET_CN (1 << 0) + +#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 _ret = 1; \ + u32 func_ret; \ + migrate_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + func_ret = func(_prog, ctx); \ + _ret &= (func_ret & 1); \ + *(ret_flags) |= (func_ret >> 1); \ + _item++; \ + } \ + rcu_read_unlock(); \ + migrate_enable(); \ + _ret; \ + }) + #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ ({ \ struct bpf_prog_array_item *_item; \ @@ -1113,25 +1140,11 @@ _out: \ */ #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 ret; \ - u32 _ret = 1; \ - u32 _cn = 0; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - bpf_cgroup_storage_set(_item->cgroup_storage); \ - ret = func(_prog, ctx); \ - _ret &= (ret & 1); \ - _cn |= (ret & 2); \ - _item++; \ - } \ - rcu_read_unlock(); \ - migrate_enable(); \ + u32 _flags = 0; \ + bool _cn; \ + u32 _ret; \ + _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ + _cn = _flags & BPF_RET_SET_CN; \ if (_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ @@ -1206,8 +1219,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i); void bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); -void __bpf_free_used_maps(struct bpf_prog_aux *aux, - struct bpf_map **used_maps, u32 len); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); @@ -1271,6 +1282,11 @@ static inline bool bpf_allow_ptr_leaks(void) return perfmon_capable(); } +static inline bool bpf_allow_uninit_stack(void) +{ + return perfmon_capable(); +} + static inline bool bpf_allow_ptr_to_map_access(void) { return perfmon_capable(); @@ -1403,7 +1419,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, union bpf_attr __user *uattr); + +#ifndef CONFIG_BPF_JIT_ALWAYS_ON void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); +#endif struct btf *bpf_get_btf_vmlinux(void); @@ -1667,12 +1686,18 @@ bpf_base_func_proto(enum bpf_func_id func_id) } #endif /* CONFIG_BPF_SYSCALL */ +void __bpf_free_used_btfs(struct bpf_prog_aux *aux, + struct btf_mod_pair *used_btfs, u32 len); + static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) { return bpf_prog_get_type_dev(ufd, type, false); } +void __bpf_free_used_maps(struct bpf_prog_aux *aux, + struct bpf_map **used_maps, u32 len); + bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); int bpf_prog_offload_compile(struct bpf_prog *prog); @@ -1860,6 +1885,7 @@ extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; extern const struct bpf_func_proto bpf_sock_from_file_proto; +extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e941fe1484e5..971b33aca13d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -195,7 +195,7 @@ struct bpf_func_state { * 0 = main function, 1 = first callee. */ u32 frameno; - /* subprog number == index within subprog_stack_depth + /* subprog number == index within subprog_info * zero == main subprog */ u32 subprogno; @@ -340,6 +340,7 @@ struct bpf_insn_aux_data { }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ +#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ #define BPF_VERIFIER_TMP_LOG_SIZE 1024 @@ -398,9 +399,12 @@ struct bpf_verifier_env { struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ + struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ u32 used_map_cnt; /* number of used maps */ + u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool allow_uninit_stack; bool allow_ptr_to_map_access; bool bpf_capable; bool bypass_spec_v1; @@ -467,6 +471,8 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); +int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + u32 regno, u32 mem_size); /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index d0bd226d6bd9..c2c2147dfeb8 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -31,6 +31,7 @@ #define PHY_ID_BCM89610 0x03625cd0 #define PHY_ID_BCM72113 0x35905310 +#define PHY_ID_BCM72116 0x35905350 #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7255 0xae025120 #define PHY_ID_BCM7260 0xae025190 @@ -60,19 +61,11 @@ #define PHY_BCM_OUI_5 0x03625e00 #define PHY_BCM_OUI_6 0xae025000 -#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 -#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 -#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 -#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 -#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 -#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 -#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 -#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 -#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 -#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 -#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 -#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 -#define PHY_BRCM_EN_MASTER_MODE 0x00010000 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001 +#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002 +#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004 +#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008 +#define PHY_BRCM_EN_MASTER_MODE 0x00000010 /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) @@ -136,6 +129,7 @@ #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 @@ -197,6 +191,7 @@ #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 #define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 #define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 +#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100 /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a @@ -222,6 +217,9 @@ /* 11111: Mode Control Register */ #define BCM54XX_SHD_MODE 0x1f #define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */ +#define BCM54XX_SHD_INTF_SEL_RGMII 0x02 +#define BCM54XX_SHD_INTF_SEL_SGMII 0x04 +#define BCM54XX_SHD_INTF_SEL_GBIC 0x06 #define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */ /* @@ -257,7 +255,6 @@ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) -#define BCM54810_SHD_SCR3_TRDDAPD 0x0100 /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) diff --git a/include/linux/btf.h b/include/linux/btf.h index 4c200f5d242b..7fabf1428093 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -91,6 +91,9 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, int btf_get_fd_by_id(u32 id); u32 btf_obj_id(const struct btf *btf); bool btf_is_kernel(const struct btf *btf); +bool btf_is_module(const struct btf *btf); +struct module *btf_try_get_module(const struct btf *btf); +u32 btf_nr_types(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); diff --git a/include/linux/buildid.h b/include/linux/buildid.h new file mode 100644 index 000000000000..40232f90db6e --- /dev/null +++ b/include/linux/buildid.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUILDID_H +#define _LINUX_BUILDID_H + +#include <linux/mm_types.h> + +#define BUILD_ID_SIZE_MAX 20 + +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, + __u32 *size); + +#endif diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h new file mode 100644 index 000000000000..707575c668f4 --- /dev/null +++ b/include/linux/can/bittiming.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _CAN_BITTIMING_H +#define _CAN_BITTIMING_H + +#include <linux/netdevice.h> +#include <linux/can/netlink.h> + +#define CAN_SYNC_SEG 1 + +#ifdef CONFIG_CAN_CALC_BITTIMING +int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc); +#else /* !CONFIG_CAN_CALC_BITTIMING */ +static inline int +can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc) +{ + netdev_err(dev, "bit-timing calculation not available\n"); + return -EINVAL; +} +#endif /* CONFIG_CAN_CALC_BITTIMING */ + +int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt); + +/* + * can_bit_time() - Duration of one bit + * + * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for + * additional information. + * + * Return: the number of time quanta in one bit. + */ +static inline unsigned int can_bit_time(const struct can_bittiming *bt) +{ + return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2; +} + +#endif /* !_CAN_BITTIMING_H */ diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 2f5d731ae251..8afa92d15a66 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -44,6 +44,7 @@ #include <linux/can.h> #include <linux/list.h> +#include <linux/netdevice.h> #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) #define CAN_EFF_RCV_HASH_BITS 10 @@ -65,4 +66,15 @@ struct can_ml_priv { #endif }; +static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev) +{ + return netdev_get_ml_priv(dev, ML_PRIV_CAN); +} + +static inline void can_set_ml_priv(struct net_device *dev, + struct can_ml_priv *ml_priv) +{ + netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN); +} + #endif /* CAN_ML_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 197a79535cc2..ac4d83a1ab81 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -15,8 +15,10 @@ #define _CAN_DEV_H #include <linux/can.h> +#include <linux/can/bittiming.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/length.h> #include <linux/can/netlink.h> #include <linux/can/skb.h> #include <linux/netdevice.h> @@ -82,118 +84,6 @@ struct can_priv { #endif }; -#define CAN_SYNC_SEG 1 - -/* - * can_bit_time() - Duration of one bit - * - * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for - * additional information. - * - * Return: the number of time quanta in one bit. - */ -static inline unsigned int can_bit_time(const struct can_bittiming *bt) -{ - return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2; -} - -/* - * can_cc_dlc2len(value) - convert a given data length code (dlc) of a - * Classical CAN frame into a valid data length of max. 8 bytes. - * - * To be used in the CAN netdriver receive path to ensure conformance with - * ISO 11898-1 Chapter 8.4.2.3 (DLC field) - */ -#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN)) - -/* Check for outgoing skbs that have not been created by the CAN subsystem */ -static inline bool can_skb_headroom_valid(struct net_device *dev, - struct sk_buff *skb) -{ - /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ - if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) - return false; - - /* af_packet does not apply CAN skb specific settings */ - if (skb->ip_summed == CHECKSUM_NONE) { - /* init headroom */ - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* perform proper loopback on capable devices */ - if (dev->flags & IFF_ECHO) - skb->pkt_type = PACKET_LOOPBACK; - else - skb->pkt_type = PACKET_HOST; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - } - - return true; -} - -/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ -static inline bool can_dropped_invalid_skb(struct net_device *dev, - struct sk_buff *skb) -{ - const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (skb->protocol == htons(ETH_P_CAN)) { - if (unlikely(skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->protocol == htons(ETH_P_CANFD)) { - if (unlikely(skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; - } else - goto inval_skb; - - if (!can_skb_headroom_valid(dev, skb)) - goto inval_skb; - - return false; - -inval_skb: - kfree_skb(skb); - dev->stats.tx_dropped++; - return true; -} - -static inline bool can_is_canfd_skb(const struct sk_buff *skb) -{ - /* the CAN specific type of skb is identified by its data length */ - return skb->len == CANFD_MTU; -} - -/* helper to get the data length code (DLC) for Classical CAN raw DLC access */ -static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode) -{ - /* return len8_dlc as dlc value only if all conditions apply */ - if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) && - (cf->len == CAN_MAX_DLEN) && - (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC)) - return cf->len8_dlc; - - /* return the payload length as dlc value */ - return cf->len; -} - -/* helper to set len and len8_dlc value for Classical CAN raw DLC access */ -static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc, - const u32 ctrlmode) -{ - /* the caller already ensured that dlc is a value from 0 .. 15 */ - if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN) - cf->len8_dlc = dlc; - - /* limit the payload length 'len' to CAN_MAX_DLEN */ - cf->len = can_cc_dlc2len(dlc); -} /* helper to define static CAN controller features at device creation time */ static inline void can_set_static_ctrlmode(struct net_device *dev, @@ -210,11 +100,7 @@ static inline void can_set_static_ctrlmode(struct net_device *dev, dev->mtu = CANFD_MTU; } -/* get data length from raw data length code (DLC) */ -u8 can_fd_dlc2len(u8 dlc); - -/* map the sanitized data length to an appropriate data length code */ -u8 can_fd_len2dlc(u8 len); +void can_setup(struct net_device *dev); struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs); @@ -237,26 +123,18 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); +const char *can_get_state_str(const enum can_state state); void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state); -int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, - unsigned int idx); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, - u8 *len_ptr); -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); -void can_free_echo_skb(struct net_device *dev, unsigned int idx); - #ifdef CONFIG_OF void of_can_transceiver(struct net_device *dev); #else static inline void of_can_transceiver(struct net_device *dev) { } #endif -struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); -struct sk_buff *alloc_canfd_skb(struct net_device *dev, - struct canfd_frame **cfd); -struct sk_buff *alloc_can_err_skb(struct net_device *dev, - struct can_frame **cf); +extern struct rtnl_link_ops can_link_ops; +int can_netlink_register(void); +void can_netlink_unregister(void); #endif /* !_CAN_DEV_H */ diff --git a/include/linux/can/length.h b/include/linux/can/length.h new file mode 100644 index 000000000000..6995092b774e --- /dev/null +++ b/include/linux/can/length.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net> + * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de> + */ + +#ifndef _CAN_LENGTH_H +#define _CAN_LENGTH_H + +/* + * Size of a Classical CAN Standard Frame + * + * Name of Field Bits + * --------------------------------------------------------- + * Start-of-frame 1 + * Identifier 11 + * Remote transmission request (RTR) 1 + * Identifier extension bit (IDE) 1 + * Reserved bit (r0) 1 + * Data length code (DLC) 4 + * Data field 0...64 + * CRC 15 + * CRC delimiter 1 + * ACK slot 1 + * ACK delimiter 1 + * End-of-frame (EOF) 7 + * Inter frame spacing 3 + * + * rounded up and ignoring bitstuffing + */ +#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8) + +/* + * Size of a Classical CAN Extended Frame + * + * Name of Field Bits + * --------------------------------------------------------- + * Start-of-frame 1 + * Identifier A 11 + * Substitute remote request (SRR) 1 + * Identifier extension bit (IDE) 1 + * Identifier B 18 + * Remote transmission request (RTR) 1 + * Reserved bits (r1, r0) 2 + * Data length code (DLC) 4 + * Data field 0...64 + * CRC 15 + * CRC delimiter 1 + * ACK slot 1 + * ACK delimiter 1 + * End-of-frame (EOF) 7 + * Inter frame spacing 3 + * + * rounded up and ignoring bitstuffing + */ +#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8) + +/* + * Size of a CAN-FD Standard Frame + * + * Name of Field Bits + * --------------------------------------------------------- + * Start-of-frame 1 + * Identifier 11 + * Reserved bit (r1) 1 + * Identifier extension bit (IDE) 1 + * Flexible data rate format (FDF) 1 + * Reserved bit (r0) 1 + * Bit Rate Switch (BRS) 1 + * Error Status Indicator (ESI) 1 + * Data length code (DLC) 4 + * Data field 0...512 + * Stuff Bit Count (SBC) 0...16: 4 20...64:5 + * CRC 0...16: 17 20...64:21 + * CRC delimiter (CD) 1 + * ACK slot (AS) 1 + * ACK delimiter (AD) 1 + * End-of-frame (EOF) 7 + * Inter frame spacing 3 + * + * assuming CRC21, rounded up and ignoring bitstuffing + */ +#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8) + +/* + * Size of a CAN-FD Extended Frame + * + * Name of Field Bits + * --------------------------------------------------------- + * Start-of-frame 1 + * Identifier A 11 + * Substitute remote request (SRR) 1 + * Identifier extension bit (IDE) 1 + * Identifier B 18 + * Reserved bit (r1) 1 + * Flexible data rate format (FDF) 1 + * Reserved bit (r0) 1 + * Bit Rate Switch (BRS) 1 + * Error Status Indicator (ESI) 1 + * Data length code (DLC) 4 + * Data field 0...512 + * Stuff Bit Count (SBC) 0...16: 4 20...64:5 + * CRC 0...16: 17 20...64:21 + * CRC delimiter (CD) 1 + * ACK slot (AS) 1 + * ACK delimiter (AD) 1 + * End-of-frame (EOF) 7 + * Inter frame spacing 3 + * + * assuming CRC21, rounded up and ignoring bitstuffing + */ +#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8) + +/* + * Maximum size of a Classical CAN frame + * (rounded up and ignoring bitstuffing) + */ +#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN) + +/* + * Maximum size of a CAN-FD frame + * (rounded up and ignoring bitstuffing) + */ +#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN) + +/* + * can_cc_dlc2len(value) - convert a given data length code (dlc) of a + * Classical CAN frame into a valid data length of max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN)) + +/* helper to get the data length code (DLC) for Classical CAN raw DLC access */ +static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode) +{ + /* return len8_dlc as dlc value only if all conditions apply */ + if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) && + (cf->len == CAN_MAX_DLEN) && + (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC)) + return cf->len8_dlc; + + /* return the payload length as dlc value */ + return cf->len; +} + +/* helper to set len and len8_dlc value for Classical CAN raw DLC access */ +static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc, + const u32 ctrlmode) +{ + /* the caller already ensured that dlc is a value from 0 .. 15 */ + if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN) + cf->len8_dlc = dlc; + + /* limit the payload length 'len' to CAN_MAX_DLEN */ + cf->len = can_cc_dlc2len(dlc); +} + +/* get data length from raw data length code (DLC) */ +u8 can_fd_dlc2len(u8 dlc); + +/* map the sanitized data length to an appropriate data length code */ +u8 can_fd_len2dlc(u8 len); + +/* calculate the CAN Frame length in bytes of a given skb */ +unsigned int can_skb_get_frame_len(const struct sk_buff *skb); + +/* map the data length to an appropriate data link layer length */ +static inline u8 canfd_sanitize_len(u8 len) +{ + return can_fd_dlc2len(can_fd_len2dlc(len)); +} + +#endif /* !_CAN_LENGTH_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index f1b38088b765..40882df7105e 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -44,7 +44,8 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, - unsigned int idx, u32 timestamp); + unsigned int idx, u32 timestamp, + unsigned int *frame_len_ptr); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb); void can_rx_offload_del(struct can_rx_offload *offload); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index fc61cf4eff1c..d438eb058069 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -16,6 +16,20 @@ #include <linux/can.h> #include <net/sock.h> +void can_flush_echo_skb(struct net_device *dev); +int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx, unsigned int frame_len); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, + u8 *len_ptr, unsigned int *frame_len_ptr); +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *frame_len_ptr); +void can_free_echo_skb(struct net_device *dev, unsigned int idx); +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd); +struct sk_buff *alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf); + /* * The struct can_skb_priv is used to transport additional information along * with the stored struct can(fd)_frame that can not be contained in existing @@ -29,11 +43,13 @@ * struct can_skb_priv - private additional data inside CAN sk_buffs * @ifindex: ifindex of the first interface the CAN frame appeared on * @skbcnt: atomic counter to have an unique id together with skb pointer + * @frame_len: length of CAN frame in data link layer * @cf: align to the following CAN frame at skb->data */ struct can_skb_priv { int ifindex; int skbcnt; + unsigned int frame_len; struct can_frame cf[]; }; @@ -49,8 +65,12 @@ static inline void can_skb_reserve(struct sk_buff *skb) static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { - if (sk) { - sock_hold(sk); + /* If the socket has already been closed by user space, the + * refcount may already be 0 (and the socket will be freed + * after the last TX skb has been freed). So only increase + * socket refcount if the refcount is > 0. + */ + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { skb->destructor = sock_efree; skb->sk = sk; } @@ -74,4 +94,68 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) return nskb; } +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* perform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +static inline bool can_dropped_invalid_skb(struct net_device *dev, + struct sk_buff *skb) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + + if (skb->protocol == htons(ETH_P_CAN)) { + if (unlikely(skb->len != CAN_MTU || + cfd->len > CAN_MAX_DLEN)) + goto inval_skb; + } else if (skb->protocol == htons(ETH_P_CANFD)) { + if (unlikely(skb->len != CANFD_MTU || + cfd->len > CANFD_MAX_DLEN)) + goto inval_skb; + } else + goto inval_skb; + + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + + return false; + +inval_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return true; +} + +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + #endif /* !_CAN_SKB_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index b2f698915c0f..65efb74c3585 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -247,8 +247,11 @@ static inline bool ns_capable_setid(struct user_namespace *ns, int cap) return true; } #endif /* CONFIG_MULTIUSER */ -extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); -extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); +bool privileged_wrt_inode_uidgid(struct user_namespace *ns, + struct user_namespace *mnt_userns, + const struct inode *inode); +bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns, + const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); static inline bool perfmon_capable(void) @@ -268,8 +271,11 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) } /* audit system wants to get cap info from files as well */ -extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); +int get_vfs_caps_from_disk(struct user_namespace *mnt_userns, + const struct dentry *dentry, + struct cpu_vfs_cap_data *cpu_caps); -extern int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size); +int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry, + const void **ivalue, size_t size); #endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index eb9008bb3992..409d8c29bc4f 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -32,10 +32,9 @@ #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */ -#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ -#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ -#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs (msgr1) */ -#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ +#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */ +#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */ +#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */ #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 4060004968c8..6617d9c68d86 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h @@ -4,7 +4,7 @@ * Version: 0.1.0 * Description: cfag12864b LCD driver header * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org> * Date: 2006-10-12 */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 451c2d26a5db..4f2f79de083e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it); * Inline functions. */ -static inline u64 cgroup_id(struct cgroup *cgrp) +static inline u64 cgroup_id(const struct cgroup *cgrp) { return cgrp->kn->id; } @@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); struct cgroup_subsys_state; struct cgroup; -static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; } +static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; } static inline void css_get(struct cgroup_subsys_state *css) {} static inline void css_put(struct cgroup_subsys_state *css) {} static inline int cgroup_attach_task_all(struct task_struct *from, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index e4316890661a..58f6fe866ae9 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -941,7 +941,9 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); void clk_hw_unregister_fixed_factor(struct clk_hw *hw); - +struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); /** * struct clk_fractional_divider - adjustable fractional divider clock * diff --git a/include/linux/clk.h b/include/linux/clk.h index 31ff1bf1b79f..266e8de3cb51 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -92,7 +92,7 @@ struct clk_bulk_data { #ifdef CONFIG_COMMON_CLK /** - * clk_notifier_register: register a clock rate-change notifier callback + * clk_notifier_register - register a clock rate-change notifier callback * @clk: clock whose rate we are interested in * @nb: notifier block with callback function pointer * @@ -103,7 +103,7 @@ struct clk_bulk_data { int clk_notifier_register(struct clk *clk, struct notifier_block *nb); /** - * clk_notifier_unregister: unregister a clock rate-change notifier callback + * clk_notifier_unregister - unregister a clock rate-change notifier callback * @clk: clock whose rate we are no longer interested in * @nb: notifier block which will be unregistered */ @@ -238,6 +238,7 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) #endif +#ifdef CONFIG_HAVE_CLK_PREPARE /** * clk_prepare - prepare a clock source * @clk: clock source @@ -246,10 +247,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) * * Must not be called from within atomic context. */ -#ifdef CONFIG_HAVE_CLK_PREPARE int clk_prepare(struct clk *clk); int __must_check clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks); + +/** + * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. + * @clk: clock source + * + * Returns true if clk_prepare() implicitly enables the clock, effectively + * making clk_enable()/clk_disable() no-ops, false otherwise. + * + * This is of interest mainly to the power management code where actually + * disabling the clock also requires unpreparing it to have any material + * effect. + * + * Regardless of the value returned here, the caller must always invoke + * clk_enable() or clk_prepare_enable() and counterparts for usage counts + * to be right. + */ +bool clk_is_enabled_when_prepared(struct clk *clk); #else static inline int clk_prepare(struct clk *clk) { @@ -263,6 +280,11 @@ clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) might_sleep(); return 0; } + +static inline bool clk_is_enabled_when_prepared(struct clk *clk) +{ + return false; +} #endif /** diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h new file mode 100644 index 000000000000..75a0d9696552 --- /dev/null +++ b/include/linux/clk/imx.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Freescale Semiconductor, Inc. + * + * Author: Lee Jones <lee.jones@linaro.org> + */ + +#ifndef __LINUX_CLK_IMX_H +#define __LINUX_CLK_IMX_H + +#include <linux/types.h> + +void imx6sl_set_wait_clk(bool enter); + +#endif diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h new file mode 100644 index 000000000000..a64d034ceddd --- /dev/null +++ b/include/linux/clk/spear.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 STMicroelectronics - All Rights Reserved + * + * Author: Lee Jones <lee.jones@linaro.org> + */ + +#ifndef __LINUX_CLK_SPEAR_H +#define __LINUX_CLK_SPEAR_H + +#ifdef CONFIG_MACH_SPEAR1310 +void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base); +#else +static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {} +#endif + +#ifdef CONFIG_MACH_SPEAR1340 +void __init spear1340_clk_init(void __iomem *misc_base); +#else +static inline void spear1340_clk_init(void __iomem *misc_base) {} +#endif + +#endif diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 3f01d43f0598..eb016fc9cc0b 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -136,6 +136,7 @@ extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); extern void tegra210_clk_emc_update_setting(u32 emc_src_value); struct clk; +struct tegra_emc; typedef long (tegra20_clk_emc_round_cb)(unsigned long rate, unsigned long min_rate, @@ -146,6 +147,13 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, void *cb_arg); int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc, + unsigned long rate); +typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc, + unsigned long rate); +void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, + tegra124_emc_complete_timing_change_cb *complete_cb); + struct tegra210_clk_emc_config { unsigned long rate; bool same_freq; diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 98cff1b4b088..d217c382b02d 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -3,16 +3,6 @@ #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif -#define CLANG_VERSION (__clang_major__ * 10000 \ - + __clang_minor__ * 100 \ - + __clang_patchlevel__) - -#if CLANG_VERSION < 100001 -#ifndef __BPF_TRACING__ -# error Sorry, your version of Clang is too old - please use 10.0.1 or newer. -#endif -#endif - /* Compiler specific definitions for Clang compiler */ /* same as gcc, this was present in clang-2.6 so we can assume it works @@ -41,6 +31,12 @@ #define __no_sanitize_thread #endif +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#define __HAVE_BUILTIN_BSWAP16__ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ + #if __has_feature(undefined_behavior_sanitizer) /* GCC does not have __SANITIZE_UNDEFINED__ */ #define __no_sanitize_undefined \ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 555ab0fddbef..48750243db4c 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -10,17 +10,6 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ -#if GCC_VERSION < 40900 -# error Sorry, your version of GCC is too old - please use 4.9 or newer. -#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 -/* - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 - * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk - */ -# error Sorry, your version of GCC is too old - please use 5.1 or newer. -#endif - /* * This macro obfuscates arithmetic on a variable address so that gcc * shouldn't recognize the original var, and make assumptions about it. diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h new file mode 100644 index 000000000000..2b2972c77c62 --- /dev/null +++ b/include/linux/compiler-version.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifdef __LINUX_COMPILER_VERSION_H +#error "Please do not include <linux/compiler-version.h>. This is done by the build system." +#endif +#define __LINUX_COMPILER_VERSION_H + +/* + * This header exists to force full rebuild when the compiler is upgraded. + * + * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT" + * and add dependency on include/config/cc/version/text.h, which is touched + * by Kconfig when the version string from the compiler changes. + */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b8fe0c23cfff..df5b405e6305 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -76,6 +76,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #else # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) +# define likely_notrace(x) likely(x) +# define unlikely_notrace(x) unlikely(x) #endif /* Optimization barrier */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index ea5e04e75845..c043b8d2b17b 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -211,6 +211,12 @@ #endif /* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes + * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten + */ +# define __flatten __attribute__((flatten)) + +/* * Note the missing underscores. * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute diff --git a/include/linux/connector.h b/include/linux/connector.h index 8ea860efea37..487350bb19c3 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -99,7 +99,7 @@ void cn_del_callback(const struct cb_id *id); int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); /** - * cn_netlink_send_mult - Sends message to the specified groups. + * cn_netlink_send - Sends message to the specified groups. * * @msg: message header(with attached data). * @portid: destination port. diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index b0e35eec6499..4ac5c081af93 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -10,17 +10,27 @@ #define CORESIGHT_ETM_PMU_NAME "cs_etm" #define CORESIGHT_ETM_PMU_SEED 0x10 -/* ETMv3.5/PTM's ETMCR config bit */ -#define ETM_OPT_CYCACC 12 -#define ETM_OPT_CTXTID 14 -#define ETM_OPT_TS 28 -#define ETM_OPT_RETSTK 29 +/* + * Below are the definition of bit offsets for perf option, and works as + * arbitrary values for all ETM versions. + * + * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore, + * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and + * directly use below macros as config bits. + */ +#define ETM_OPT_CYCACC 12 +#define ETM_OPT_CTXTID 14 +#define ETM_OPT_CTXTID2 15 +#define ETM_OPT_TS 28 +#define ETM_OPT_RETSTK 29 /* ETMv4 CONFIGR programming bits for the ETM OPTs */ #define ETM4_CFG_BIT_CYCACC 4 #define ETM4_CFG_BIT_CTXTID 6 +#define ETM4_CFG_BIT_VMID 7 #define ETM4_CFG_BIT_TS 11 #define ETM4_CFG_BIT_RETSTK 12 +#define ETM4_CFG_BIT_VMID_OPT 15 static inline int coresight_get_trace_id(int cpu) { diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 7d3c87e5b97c..976ec2697610 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -7,6 +7,7 @@ #define _LINUX_CORESIGHT_H #include <linux/device.h> +#include <linux/io.h> #include <linux/perf_event.h> #include <linux/sched.h> @@ -115,6 +116,32 @@ struct coresight_platform_data { }; /** + * struct csdev_access - Abstraction of a CoreSight device access. + * + * @io_mem : True if the device has memory mapped I/O + * @base : When io_mem == true, base address of the component + * @read : Read from the given "offset" of the given instance. + * @write : Write "val" to the given "offset". + */ +struct csdev_access { + bool io_mem; + union { + void __iomem *base; + struct { + u64 (*read)(u32 offset, bool relaxed, bool _64bit); + void (*write)(u64 val, u32 offset, bool relaxed, + bool _64bit); + }; + }; +}; + +#define CSDEV_ACCESS_IOMEM(_addr) \ + ((struct csdev_access) { \ + .io_mem = true, \ + .base = (_addr), \ + }) + +/** * struct coresight_desc - description of a component required from drivers * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. @@ -125,6 +152,7 @@ struct coresight_platform_data { * @groups: operations specific to this component. These will end up * in the component's sysfs sub-directory. * @name: name for the coresight device, also shown under sysfs. + * @access: Describe access to the device */ struct coresight_desc { enum coresight_dev_type type; @@ -134,6 +162,7 @@ struct coresight_desc { struct device *dev; const struct attribute_group **groups; const char *name; + struct csdev_access access; }; /** @@ -173,7 +202,8 @@ struct coresight_sysfs_link { * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined - by @coresight_ops. + * by @coresight_ops. + * @access: Device i/o access abstraction for this device. * @dev: The device entity associated to this component. * @refcnt: keep track of what is in use. * @orphan: true if the component has connections that haven't been linked. @@ -195,6 +225,7 @@ struct coresight_device { enum coresight_dev_type type; union coresight_dev_subtype subtype; const struct coresight_ops *ops; + struct csdev_access access; struct device dev; atomic_t *refcnt; bool orphan; @@ -326,23 +357,133 @@ struct coresight_ops { }; #if IS_ENABLED(CONFIG_CORESIGHT) + +static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa, + u32 offset) +{ + if (likely(csa->io_mem)) + return readl_relaxed(csa->base + offset); + + return csa->read(offset, true, false); +} + +static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset) +{ + if (likely(csa->io_mem)) + return readl(csa->base + offset); + + return csa->read(offset, false, false); +} + +static inline void csdev_access_relaxed_write32(struct csdev_access *csa, + u32 val, u32 offset) +{ + if (likely(csa->io_mem)) + writel_relaxed(val, csa->base + offset); + else + csa->write(val, offset, true, false); +} + +static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset) +{ + if (likely(csa->io_mem)) + writel(val, csa->base + offset); + else + csa->write(val, offset, false, false); +} + +#ifdef CONFIG_64BIT + +static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa, + u32 offset) +{ + if (likely(csa->io_mem)) + return readq_relaxed(csa->base + offset); + + return csa->read(offset, true, true); +} + +static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset) +{ + if (likely(csa->io_mem)) + return readq(csa->base + offset); + + return csa->read(offset, false, true); +} + +static inline void csdev_access_relaxed_write64(struct csdev_access *csa, + u64 val, u32 offset) +{ + if (likely(csa->io_mem)) + writeq_relaxed(val, csa->base + offset); + else + csa->write(val, offset, true, true); +} + +static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset) +{ + if (likely(csa->io_mem)) + writeq(val, csa->base + offset); + else + csa->write(val, offset, false, true); +} + +#else /* !CONFIG_64BIT */ + +static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa, + u32 offset) +{ + WARN_ON(1); + return 0; +} + +static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset) +{ + WARN_ON(1); + return 0; +} + +static inline void csdev_access_relaxed_write64(struct csdev_access *csa, + u64 val, u32 offset) +{ + WARN_ON(1); +} + +static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset) +{ + WARN_ON(1); +} +#endif /* CONFIG_64BIT */ + extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); -extern int coresight_timeout(void __iomem *addr, u32 offset, +extern int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value); -extern int coresight_claim_device(void __iomem *base); -extern int coresight_claim_device_unlocked(void __iomem *base); +extern int coresight_claim_device(struct coresight_device *csdev); +extern int coresight_claim_device_unlocked(struct coresight_device *csdev); -extern void coresight_disclaim_device(void __iomem *base); -extern void coresight_disclaim_device_unlocked(void __iomem *base); +extern void coresight_disclaim_device(struct coresight_device *csdev); +extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev); extern char *coresight_alloc_device_name(struct coresight_dev_list *devs, struct device *dev); extern bool coresight_loses_context_with_cpu(struct device *dev); + +u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset); +u32 coresight_read32(struct coresight_device *csdev, u32 offset); +void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset); +void coresight_relaxed_write32(struct coresight_device *csdev, + u32 val, u32 offset); +u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset); +u64 coresight_read64(struct coresight_device *csdev, u32 offset); +void coresight_relaxed_write64(struct coresight_device *csdev, + u64 val, u32 offset); +void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset); + #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } @@ -350,29 +491,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {} static inline int coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } static inline void coresight_disable(struct coresight_device *csdev) {} -static inline int coresight_timeout(void __iomem *addr, u32 offset, - int position, int value) { return 1; } -static inline int coresight_claim_device_unlocked(void __iomem *base) + +static inline int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return 1; +} + +static inline int coresight_claim_device_unlocked(struct coresight_device *csdev) { return -EINVAL; } -static inline int coresight_claim_device(void __iomem *base) +static inline int coresight_claim_device(struct coresight_device *csdev) { return -EINVAL; } -static inline void coresight_disclaim_device(void __iomem *base) {} -static inline void coresight_disclaim_device_unlocked(void __iomem *base) {} +static inline void coresight_disclaim_device(struct coresight_device *csdev) {} +static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {} static inline bool coresight_loses_context_with_cpu(struct device *dev) { return false; } -#endif + +static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) +{ + WARN_ON_ONCE(1); + return 0; +} + +static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset) +{ + WARN_ON_ONCE(1); + return 0; +} + +static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset) +{ +} + +static inline void coresight_relaxed_write32(struct coresight_device *csdev, + u32 val, u32 offset) +{ +} + +static inline u64 coresight_relaxed_read64(struct coresight_device *csdev, + u32 offset) +{ + WARN_ON_ONCE(1); + return 0; +} + +static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset) +{ + WARN_ON_ONCE(1); + return 0; +} + +static inline void coresight_relaxed_write64(struct coresight_device *csdev, + u64 val, u32 offset) +{ +} + +static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset) +{ +} + +#endif /* IS_ENABLED(CONFIG_CORESIGHT) */ extern int coresight_get_cpu(struct device *dev); struct coresight_platform_data *coresight_get_platform_data(struct device *dev); -#endif +#endif /* _LINUX_COREISGHT_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d6428aaf67e7..94a578a96202 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -108,9 +108,13 @@ static inline void cpu_maps_update_done(void) { } +static inline int add_cpu(unsigned int cpu) { return 0;} + #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; +extern int lockdep_is_cpus_held(void); + #ifdef CONFIG_HOTPLUG_CPU extern void cpus_write_lock(void); extern void cpus_write_unlock(void); @@ -135,6 +139,7 @@ static inline int cpus_read_trylock(void) { return true; } static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } +static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9c8b7437b6cd..353969c7acd3 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -387,14 +387,22 @@ struct cpufreq_driver { /* flags */ -/* driver isn't removed even if all ->init() calls failed */ -#define CPUFREQ_STICKY BIT(0) +/* + * Set by drivers that need to update internale upper and lower boundaries along + * with the target frequency and so the core and governors should also invoke + * the diver if the target frequency does not change, but the policy min or max + * may have changed. + */ +#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0) /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ #define CPUFREQ_CONST_LOOPS BIT(1) -/* don't warn on suspend/resume speed mismatches */ -#define CPUFREQ_PM_NO_WARN BIT(2) +/* + * Set by drivers that want the core to automatically register the cpufreq + * driver as a thermal cooling device. + */ +#define CPUFREQ_IS_COOLING_DEV BIT(2) /* * This should be set by platforms having multiple clock-domains, i.e. @@ -426,20 +434,6 @@ struct cpufreq_driver { */ #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) -/* - * Set by drivers that want the core to automatically register the cpufreq - * driver as a thermal cooling device. - */ -#define CPUFREQ_IS_COOLING_DEV BIT(7) - -/* - * Set by drivers that need to update internale upper and lower boundaries along - * with the target frequency and so the core and governors should also invoke - * the diver if the target frequency does not change, but the policy min or max - * may have changed. - */ -#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8) - int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 0042ef362511..f14adb882338 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -168,6 +168,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, + CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, @@ -185,6 +186,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, + CPUHP_AP_PERF_CSKY_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, @@ -193,6 +195,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, + CPUHP_AP_DTPM_CPU_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; diff --git a/include/linux/cred.h b/include/linux/cred.h index 18639c069263..4c6350503697 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -25,7 +25,7 @@ struct inode; struct group_info { atomic_t usage; int ngroups; - kgid_t gid[0]; + kgid_t gid[]; } __randomize_layout; /** diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ef90e07c9635..da5e0d74bb2f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -151,9 +151,12 @@ * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual * declaration) is used to ensure that the crypto_tfm context structure is * aligned correctly for the given architecture so that there are no alignment - * faults for C data types. In particular, this is required on platforms such - * as arm where pointers are 32-bit aligned but there are data types such as - * u64 which require 64-bit alignment. + * faults for C data types. On architectures that support non-cache coherent + * DMA, such as ARM or arm64, it also takes into account the minimal alignment + * that is required to ensure that the context struct member does not share any + * cachelines with the rest of the struct. This is needed to ensure that cache + * maintenance for non-coherent DMA (cache invalidation in particular) does not + * affect data that may be accessed by the CPU concurrently. */ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN @@ -636,10 +639,6 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_cipher { - struct crypto_tfm base; -}; - struct crypto_comp { struct crypto_tfm base; }; @@ -743,165 +742,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -/** - * DOC: Single Block Cipher API - * - * The single block cipher API is used with the ciphers of type - * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). - * - * Using the single block cipher API calls, operations with the basic cipher - * primitive can be implemented. These cipher primitives exclude any block - * chaining operations including IV handling. - * - * The purpose of this single block cipher API is to support the implementation - * of templates or other concepts that only need to perform the cipher operation - * on one block at a time. Templates invoke the underlying cipher primitive - * block-wise and process either the input or the output data of these cipher - * operations. - */ - -static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_cipher *)tfm; -} - -/** - * crypto_alloc_cipher() - allocate single block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * single block cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for a single block cipher. The returned struct - * crypto_cipher is the cipher handle that is required for any subsequent API - * invocation for that single block cipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) -{ - return &tfm->base; -} - -/** - * crypto_free_cipher() - zeroize and free the single block cipher handle - * @tfm: cipher handle to be freed - */ -static inline void crypto_free_cipher(struct crypto_cipher *tfm) -{ - crypto_free_tfm(crypto_cipher_tfm(tfm)); -} - -/** - * crypto_has_cipher() - Search for the availability of a single block cipher - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * single block cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Return: true when the single block cipher is known to the kernel crypto API; - * false otherwise - */ -static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_CIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); -} - -/** - * crypto_cipher_blocksize() - obtain block size for cipher - * @tfm: cipher handle - * - * The block size for the single block cipher referenced with the cipher handle - * tfm is returned. The caller may use that information to allocate appropriate - * memory for the data returned by the encryption or decryption operation - * - * Return: block size of cipher - */ -static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) -{ - return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); -} - -static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); -} - -static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) -{ - return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); -} - -static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, - u32 flags) -{ - crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); -} - -static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, - u32 flags) -{ - crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); -} - -/** - * crypto_cipher_setkey() - set key for cipher - * @tfm: cipher handle - * @key: buffer holding the key - * @keylen: length of the key in bytes - * - * The caller provided key is set for the single block cipher referenced by the - * cipher handle. - * - * Note, the key length determines the cipher type. Many block ciphers implement - * different cipher modes depending on the key size, such as AES-128 vs AES-192 - * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 - * is performed. - * - * Return: 0 if the setting of the key was successful; < 0 if an error occurred - */ -int crypto_cipher_setkey(struct crypto_cipher *tfm, - const u8 *key, unsigned int keylen); - -/** - * crypto_cipher_encrypt_one() - encrypt one block of plaintext - * @tfm: cipher handle - * @dst: points to the buffer that will be filled with the ciphertext - * @src: buffer holding the plaintext to be encrypted - * - * Invoke the encryption operation of one block. The caller must ensure that - * the plaintext and ciphertext buffers are at least one block in size. - */ -void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src); - -/** - * crypto_cipher_decrypt_one() - decrypt one block of ciphertext - * @tfm: cipher handle - * @dst: points to the buffer that will be filled with the plaintext - * @src: buffer holding the ciphertext to be decrypted - * - * Invoke the decryption operation of one block. The caller must ensure that - * the plaintext and ciphertext buffers are at least one block in size. - */ -void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, - u8 *dst, const u8 *src); - static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { return (struct crypto_comp *)tfm; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d7b369fc15d3..c1e48014106f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -262,6 +262,8 @@ extern void d_tmpfile(struct dentry *, struct inode *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); +extern struct dentry *d_find_alias_rcu(struct inode *); + /* test whether we have any submounts in a subdir tree */ extern int path_has_submounts(const struct path *); diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h deleted file mode 100644 index ddfdac20cad0..000000000000 --- a/include/linux/dcookies.h +++ /dev/null @@ -1,69 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * dcookies.h - * - * Persistent cookie-path mappings - * - * Copyright 2002 John Levon <levon@movementarian.org> - */ - -#ifndef DCOOKIES_H -#define DCOOKIES_H - - -#ifdef CONFIG_PROFILING - -#include <linux/dcache.h> -#include <linux/types.h> - -struct dcookie_user; -struct path; - -/** - * dcookie_register - register a user of dcookies - * - * Register as a dcookie user. Returns %NULL on failure. - */ -struct dcookie_user * dcookie_register(void); - -/** - * dcookie_unregister - unregister a user of dcookies - * - * Unregister as a dcookie user. This may invalidate - * any dcookie values returned from get_dcookie(). - */ -void dcookie_unregister(struct dcookie_user * user); - -/** - * get_dcookie - acquire a dcookie - * - * Convert the given dentry/vfsmount pair into - * a cookie value. - * - * Returns -EINVAL if no living task has registered as a - * dcookie user. - * - * Returns 0 on success, with *cookie filled in - */ -int get_dcookie(const struct path *path, unsigned long *cookie); - -#else - -static inline struct dcookie_user * dcookie_register(void) -{ - return NULL; -} - -static inline void dcookie_unregister(struct dcookie_user * user) -{ - return; -} - -static inline int get_dcookie(const struct path *path, unsigned long *cookie) -{ - return -ENOSYS; -} - -#endif /* CONFIG_PROFILING */ - -#endif /* DCOOKIES_H */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index b6d3bae1c74d..26ea0850be9b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -137,6 +137,7 @@ struct devfreq_stats { * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. + * @opp_table: Reference to OPP table of dev.parent, if one exists. * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. @@ -173,6 +174,7 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; + struct opp_table *opp_table; struct notifier_block nb; struct delayed_work work; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 61a66fb8ebb3..7f4ac87c0b32 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -93,9 +93,18 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); +#ifdef CONFIG_BLK_DEV_ZONED typedef int (*dm_report_zones_fn) (struct dm_target *ti, struct dm_report_zones_args *args, unsigned int nr_zones); +#else +/* + * Define dm_report_zones_fn so that targets can assign to NULL if + * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do + * awkward #ifdefs in their target_type, etc. + */ +typedef int (*dm_report_zones_fn) (struct dm_target *dummy); +#endif /* * These iteration functions are typically used to check (and combine) @@ -187,9 +196,7 @@ struct target_type { dm_status_fn status; dm_message_fn message; dm_prepare_ioctl_fn prepare_ioctl; -#ifdef CONFIG_BLK_DEV_ZONED dm_report_zones_fn report_zones; -#endif dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; @@ -248,8 +255,13 @@ struct target_type { /* * Indicates that a target supports host-managed zoned block devices. */ +#ifdef CONFIG_BLK_DEV_ZONED #define DM_TARGET_ZONED_HM 0x00000040 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) +#else +#define DM_TARGET_ZONED_HM 0x00000000 +#define dm_target_supports_zoned_hm(type) (false) +#endif /* * A target handles REQ_NOWAIT @@ -257,6 +269,12 @@ struct target_type { #define DM_TARGET_NOWAIT 0x00000080 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) +/* + * A target supports passing through inline crypto support. + */ +#define DM_TARGET_PASSES_CRYPTO 0x00000100 +#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) + struct dm_target { struct dm_table *table; struct target_type *type; @@ -325,6 +343,11 @@ struct dm_target { * whether or not its underlying devices have support. */ bool discards_supported:1; + + /* + * Set if we need to limit the number of in-flight bios when swapping. + */ + bool limit_swap_bios:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -534,6 +557,11 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *t); /* + * Table keyslot manager functions + */ +void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); + +/* * A wrapper around vmalloc. */ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); diff --git a/include/linux/device.h b/include/linux/device.h index 1779f90eeb4c..ba660731bd25 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -291,6 +291,7 @@ struct device_dma_parameters { * sg limitations. */ unsigned int max_segment_size; + unsigned int min_align_mask; unsigned long segment_boundary_mask; }; @@ -323,6 +324,7 @@ enum device_link_state { * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. * MANAGED: The core tracks presence of supplier/consumer drivers (internal). * SYNC_STATE_ONLY: Link only affects sync_state() behavior. + * INFERRED: Inferred from data (eg: firmware) and not from driver actions. */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) @@ -332,6 +334,7 @@ enum device_link_state { #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) #define DL_FLAG_MANAGED BIT(6) #define DL_FLAG_SYNC_STATE_ONLY BIT(7) +#define DL_FLAG_INFERRED BIT(8) /** * enum dl_dev_state - Device driver presence tracking information. diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index ee7ba5b5417e..a498ebcf4993 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -75,7 +75,7 @@ enum probe_type { * @resume: Called to bring a device from sleep mode. * @groups: Default attributes that get created by the driver core * automatically. - * @dev_groups: Additional attributes attached to device instance once the + * @dev_groups: Additional attributes attached to device instance once * it is bound to the driver. * @pm: Power management operations of the device which matched * this driver. diff --git a/include/linux/dfl.h b/include/linux/dfl.h new file mode 100644 index 000000000000..6cc10982351a --- /dev/null +++ b/include/linux/dfl.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for DFL driver and device API + * + * Copyright (C) 2020 Intel Corporation, Inc. + */ + +#ifndef __LINUX_DFL_H +#define __LINUX_DFL_H + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +/** + * enum dfl_id_type - define the DFL FIU types + */ +enum dfl_id_type { + FME_ID = 0, + PORT_ID = 1, + DFL_ID_MAX, +}; + +/** + * struct dfl_device - represent an dfl device on dfl bus + * + * @dev: generic device interface. + * @id: id of the dfl device. + * @type: type of DFL FIU of the device. See enum dfl_id_type. + * @feature_id: feature identifier local to its DFL FIU type. + * @mmio_res: mmio resource of this dfl device. + * @irqs: list of Linux IRQ numbers of this dfl device. + * @num_irqs: number of IRQs supported by this dfl device. + * @cdev: pointer to DFL FPGA container device this dfl device belongs to. + * @id_entry: matched id entry in dfl driver's id table. + */ +struct dfl_device { + struct device dev; + int id; + u16 type; + u16 feature_id; + struct resource mmio_res; + int *irqs; + unsigned int num_irqs; + struct dfl_fpga_cdev *cdev; + const struct dfl_device_id *id_entry; +}; + +/** + * struct dfl_driver - represent an dfl device driver + * + * @drv: driver model structure. + * @id_table: pointer to table of device IDs the driver is interested in. + * { } member terminated. + * @probe: mandatory callback for device binding. + * @remove: callback for device unbinding. + */ +struct dfl_driver { + struct device_driver drv; + const struct dfl_device_id *id_table; + + int (*probe)(struct dfl_device *dfl_dev); + void (*remove)(struct dfl_device *dfl_dev); +}; + +#define to_dfl_dev(d) container_of(d, struct dfl_device, dev) +#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv) + +/* + * use a macro to avoid include chaining to get THIS_MODULE. + */ +#define dfl_driver_register(drv) \ + __dfl_driver_register(drv, THIS_MODULE) +int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner); +void dfl_driver_unregister(struct dfl_driver *dfl_drv); + +/* + * module_dfl_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit(). + */ +#define module_dfl_driver(__dfl_driver) \ + module_driver(__dfl_driver, dfl_driver_register, \ + dfl_driver_unregister) + +#endif /* __LINUX_DFL_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 09e23adb351d..9f12efaaa93a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {} int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, + ktime_t timestamp); signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout); int dma_fence_add_callback(struct dma_fence *fence, diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 454e354d1ffb..0c05561cad6e 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -16,15 +16,15 @@ struct dma_heap; /** * struct dma_heap_ops - ops to operate on a given heap - * @allocate: allocate dmabuf and return fd + * @allocate: allocate dmabuf and return struct dma_buf ptr * - * allocate returns dmabuf fd on success, -errno on error. + * allocate returns dmabuf on success, ERR_PTR(-errno) on error. */ struct dma_heap_ops { - int (*allocate)(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags); + struct dma_buf *(*allocate)(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags); }; /** @@ -51,6 +51,15 @@ struct dma_heap_export_info { void *dma_heap_get_drvdata(struct dma_heap *heap); /** + * dma_heap_get_name() - get heap name + * @heap: DMA-Heap to retrieve private data for + * + * Returns: + * The char* for the heap name. + */ +const char *dma_heap_get_name(struct dma_heap *heap); + +/** * dma_heap_add - adds a heap to dmabuf heaps * @exp_info: information needed to register this heap */ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 70fcd0f610ea..51872e736e7b 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -22,11 +22,6 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); - void *(*alloc_noncoherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, - gfp_t gfp); - void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -229,11 +224,10 @@ bool dma_free_from_pool(struct device *dev, void *start, size_t size); int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, dma_addr_t dma_start, u64 size); -#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H -#include <asm/dma-coherence.h> -#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +extern bool dma_default_coherent; static inline bool dev_is_dma_coherent(struct device *dev) { return dev->dma_coherent; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2e49996a8f39..2a984cb4d1e0 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -263,10 +263,19 @@ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, enum dma_data_direction dir); + +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); + return page ? page_address(page) : NULL; +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) +{ + dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); +} static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -500,6 +509,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) return -EIO; } +static inline unsigned int dma_get_min_align_mask(struct device *dev) +{ + if (dev->dma_parms) + return dev->dma_parms->min_align_mask; + return 0; +} + +static inline int dma_set_min_align_mask(struct device *dev, + unsigned int min_align_mask) +{ + if (WARN_ON_ONCE(!dev->dma_parms)) + return -EIO; + dev->dma_parms->min_align_mask = min_align_mask; + return 0; +} + static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 36e22c5a0f29..5f106d852f1c 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -42,14 +42,14 @@ enum psil_endpoint_type { /** * struct psil_endpoint_config - PSI-L Endpoint configuration * @ep_type: PSI-L endpoint type + * @channel_tpl: Desired throughput level for the channel * @pkt_mode: If set, the channel must be in Packet mode, otherwise in * TR mode * @notdpkt: TDCM must be suppressed on the TX channel * @needs_epib: Endpoint needs EPIB - * @psd_size: If set, PSdata is used by the endpoint - * @channel_tpl: Desired throughput level for the channel * @pdma_acc32: ACC32 must be enabled on the PDMA side * @pdma_burst: BURST must be enabled on the PDMA side + * @psd_size: If set, PSdata is used by the endpoint * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels. * The thread must be serviced by the specified channel if * mapped_channel_id is >= 0 in case of PKTDMA @@ -62,23 +62,22 @@ enum psil_endpoint_type { */ struct psil_endpoint_config { enum psil_endpoint_type ep_type; + enum udma_tp_level channel_tpl; unsigned pkt_mode:1; unsigned notdpkt:1; unsigned needs_epib:1; - u32 psd_size; - enum udma_tp_level channel_tpl; - /* PDMA properties, valid for PSIL_EP_PDMA_* */ unsigned pdma_acc32:1; unsigned pdma_burst:1; + u32 psd_size; /* PKDMA mapped channel */ - int mapped_channel_id; + s16 mapped_channel_id; /* PKTDMA tflow and rflow ranges for mapped channel */ u16 flow_start; u16 flow_num; - u16 default_flow_id; + s16 default_flow_id; }; int psil_set_new_ep_config(struct device *dev, const char *name, diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h deleted file mode 100644 index 25cab62a28c4..000000000000 --- a/include/linux/dma/mmp-pdma.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MMP_PDMA_H_ -#define _MMP_PDMA_H_ - -struct dma_chan; - -#ifdef CONFIG_MMP_PDMA -bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param); -#else -static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) -{ - return false; -} -#endif - -#endif /* _MMP_PDMA_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 68130f5f599e..004736b6a9c8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -745,6 +745,8 @@ enum dmaengine_alignment { DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, }; /** diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 65565820328a..e04436a7ff27 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -138,6 +138,7 @@ extern void intel_iommu_shutdown(void); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); @@ -149,6 +150,7 @@ static inline void intel_iommu_shutdown(void) { } #define dmar_parse_one_atsr dmar_res_noop #define dmar_check_one_atsr dmar_res_noop #define dmar_release_one_atsr dmar_res_noop +#define dmar_parse_one_satc dmar_res_noop static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 88cd72dfa4e0..b12b05f1c8b4 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -64,6 +64,10 @@ int dsa_8021q_rx_source_port(u16 vid); u16 dsa_8021q_rx_subvlan(u16 vid); +bool vid_is_dsa_8021q_rxvlan(u16 vid); + +bool vid_is_dsa_8021q_txvlan(u16 vid); + bool vid_is_dsa_8021q(u16 vid); #else @@ -123,6 +127,16 @@ u16 dsa_8021q_rx_subvlan(u16 vid) return 0; } +bool vid_is_dsa_8021q_rxvlan(u16 vid) +{ + return false; +} + +bool vid_is_dsa_8021q_txvlan(u16 vid) +{ + return false; +} + bool vid_is_dsa_8021q(u16 vid) { return false; diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h new file mode 100644 index 000000000000..47545a948784 --- /dev/null +++ b/include/linux/dsa/brcm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (C) 2014 Broadcom Corporation + */ + +/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and + * net/dsa/tag_brcm.c + */ +#ifndef _NET_DSA_BRCM_H +#define _NET_DSA_BRCM_H + +/* Broadcom tag specific helpers to insert and extract queue/port number */ +#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q) +#define BRCM_TAG_GET_PORT(v) ((v) >> 8) +#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff) + +#endif diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h new file mode 100644 index 000000000000..4265f328681a --- /dev/null +++ b/include/linux/dsa/ocelot.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright 2019-2021 NXP Semiconductors + */ + +#ifndef _NET_DSA_TAG_OCELOT_H +#define _NET_DSA_TAG_OCELOT_H + +#include <linux/packing.h> + +#define OCELOT_TAG_LEN 16 +#define OCELOT_SHORT_PREFIX_LEN 4 +#define OCELOT_LONG_PREFIX_LEN 16 +#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN) + +/* The CPU injection header and the CPU extraction header can have 3 types of + * prefixes: long, short and no prefix. The format of the header itself is the + * same in all 3 cases. + * + * Extraction with long prefix: + * + * +-------------------+-------------------+------+------+------------+-------+ + * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame | + * | | | | | header | | + * +-------------------+-------------------+------+------+------------+-------+ + * 48 bits 48 bits 16 bits 16 bits 128 bits + * + * Extraction with short prefix: + * + * +------+------+------------+-------+ + * | 8880 | 000a | extraction | frame | + * | | | header | | + * +------+------+------------+-------+ + * 16 bits 16 bits 128 bits + * + * Extraction with no prefix: + * + * +------------+-------+ + * | extraction | frame | + * | header | | + * +------------+-------+ + * 128 bits + * + * + * Injection with long prefix: + * + * +-------------------+-------------------+------+------+------------+-------+ + * | any dmac | any smac | 8880 | 000a | injection | frame | + * | | | | | header | | + * +-------------------+-------------------+------+------+------------+-------+ + * 48 bits 48 bits 16 bits 16 bits 128 bits + * + * Injection with short prefix: + * + * +------+------+------------+-------+ + * | 8880 | 000a | injection | frame | + * | | | header | | + * +------+------+------------+-------+ + * 16 bits 16 bits 128 bits + * + * Injection with no prefix: + * + * +------------+-------+ + * | injection | frame | + * | header | | + * +------------+-------+ + * 128 bits + * + * The injection header looks like this (network byte order, bit 127 + * is part of lowest address byte in memory, bit 0 is part of highest + * address byte): + * + * +------+------+------+------+------+------+------+------+ + * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP| + * +------+------+------+------+------+------+------+------+ + * 119:112 | REW_OP | + * +------+------+------+------+------+------+------+------+ + * 111:104 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 103: 96 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 95: 88 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 87: 80 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 79: 72 | RSV | + * +------+------+------+------+------+------+------+------+ + * 71: 64 | RSV | DEST | + * +------+------+------+------+------+------+------+------+ + * 63: 56 | DEST | + * +------+------+------+------+------+------+------+------+ + * 55: 48 | RSV | + * +------+------+------+------+------+------+------+------+ + * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER| + * +------+------+------+------+------+------+------+------+ + * 39: 32 | TFRM_TIMER | RSV | + * +------+------+------+------+------+------+------+------+ + * 31: 24 | RSV | DP | POP_CNT | CPUQ | + * +------+------+------+------+------+------+------+------+ + * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE| + * +------+------+------+------+------+------+------+------+ + * 15: 8 | PCP | DEI | VID | + * +------+------+------+------+------+------+------+------+ + * 7: 0 | VID | + * +------+------+------+------+------+------+------+------+ + * + * And the extraction header looks like this: + * + * +------+------+------+------+------+------+------+------+ + * 127:120 | RSV | REW_OP | + * +------+------+------+------+------+------+------+------+ + * 119:112 | REW_OP | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 111:104 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 103: 96 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 95: 88 | REW_VAL | + * +------+------+------+------+------+------+------+------+ + * 87: 80 | REW_VAL | LLEN | + * +------+------+------+------+------+------+------+------+ + * 79: 72 | LLEN | WLEN | + * +------+------+------+------+------+------+------+------+ + * 71: 64 | WLEN | RSV | + * +------+------+------+------+------+------+------+------+ + * 63: 56 | RSV | + * +------+------+------+------+------+------+------+------+ + * 55: 48 | RSV | + * +------+------+------+------+------+------+------+------+ + * 47: 40 | RSV | SRC_PORT | ACL_ID | + * +------+------+------+------+------+------+------+------+ + * 39: 32 | ACL_ID | RSV | SFLOW_ID | + * +------+------+------+------+------+------+------+------+ + * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ | + * +------+------+------+------+------+------+------+------+ + * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE| + * +------+------+------+------+------+------+------+------+ + * 15: 8 | PCP | DEI | VID | + * +------+------+------+------+------+------+------+------+ + * 7: 0 | VID | + * +------+------+------+------+------+------+------+------+ + */ + +static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val) +{ + packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_xfh_get_len(void *extraction, u64 *len) +{ + u64 llen, wlen; + + packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0); + packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0); + + *len = 60 * wlen + llen - 80; +} + +static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port) +{ + packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_xfh_get_cpuq(void *extraction, u64 *cpuq) +{ + packing(extraction, cpuq, 28, 20, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class) +{ + packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type) +{ + packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci) +{ + packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0); +} + +static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass) +{ + packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op) +{ + packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_dest(void *injection, u64 dest) +{ + packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class) +{ + packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void seville_ifh_set_dest(void *injection, u64 dest) +{ + packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_src(void *injection, u64 src) +{ + packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type) +{ + packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0); +} + +static inline void ocelot_ifh_set_vid(void *injection, u64 vid) +{ + packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0); +} + +#endif diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h new file mode 100644 index 000000000000..e80a332e3d8a --- /dev/null +++ b/include/linux/dtpm.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Linaro Ltd + * + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + */ +#ifndef ___DTPM_H__ +#define ___DTPM_H__ + +#include <linux/powercap.h> + +#define MAX_DTPM_DESCR 8 +#define MAX_DTPM_CONSTRAINTS 1 + +struct dtpm { + struct powercap_zone zone; + struct dtpm *parent; + struct list_head sibling; + struct list_head children; + struct dtpm_ops *ops; + unsigned long flags; + u64 power_limit; + u64 power_max; + u64 power_min; + int weight; + void *private; +}; + +struct dtpm_ops { + u64 (*set_power_uw)(struct dtpm *, u64); + u64 (*get_power_uw)(struct dtpm *); + void (*release)(struct dtpm *); +}; + +struct dtpm_descr; + +typedef int (*dtpm_init_t)(struct dtpm_descr *); + +struct dtpm_descr { + struct dtpm *parent; + const char *name; + dtpm_init_t init; +}; + +/* Init section thermal table */ +extern struct dtpm_descr *__dtpm_table[]; +extern struct dtpm_descr *__dtpm_table_end[]; + +#define DTPM_TABLE_ENTRY(name) \ + static typeof(name) *__dtpm_table_entry_##name \ + __used __section("__dtpm_table") = &name + +#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name) + +#define for_each_dtpm_table(__dtpm) \ + for (__dtpm = __dtpm_table; \ + __dtpm < __dtpm_table_end; \ + __dtpm++) + +static inline struct dtpm *to_dtpm(struct powercap_zone *zone) +{ + return container_of(zone, struct dtpm, zone); +} + +int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max); + +int dtpm_release_zone(struct powercap_zone *pcz); + +struct dtpm *dtpm_alloc(struct dtpm_ops *ops); + +void dtpm_unregister(struct dtpm *dtpm); + +int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); + +int dtpm_register_cpu(struct dtpm *parent); + +#endif diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index eec7928ff8fe..99580c22f91a 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data { #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) /* Instructions such as EWEN are (addrlen + 2) in length. */ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) +/* Add extra cycle after address during a read */ +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) /* * optional hooks to control additional logic diff --git a/include/linux/efi.h b/include/linux/efi.h index 763b816ba19c..8710f5710c1d 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -29,10 +29,10 @@ #include <asm/page.h> #define EFI_SUCCESS 0 -#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) +#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) #define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) -#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1))) #define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1))) @@ -167,8 +167,6 @@ struct capsule_info { int __efi_capsule_setup_info(struct capsule_info *cap_info); -typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); - /* * Types and defines for Time Services */ @@ -605,10 +603,6 @@ efi_guid_to_str(efi_guid_t *guid, char *out) } extern void efi_init (void); -extern void *efi_get_pal_addr (void); -extern void efi_map_pal_code (void); -extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); -extern void efi_gettimeofday (struct timespec64 *ts); #ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #else @@ -1110,13 +1104,6 @@ enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var) return efi_secureboot_mode_enabled; } -#ifdef CONFIG_RESET_ATTACK_MITIGATION -void efi_enable_reset_attack_mitigation(void); -#else -static inline void -efi_enable_reset_attack_mitigation(void) { } -#endif - #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE void efi_check_for_embedded_firmwares(void); #else @@ -1125,8 +1112,6 @@ static inline void efi_check_for_embedded_firmwares(void) { } efi_status_t efi_random_get_seed(void); -void efi_retrieve_tpm2_eventlog(void); - /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): diff --git a/include/linux/elevator.h b/include/linux/elevator.h index bacc40a0bdf3..1fe8e105b83b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -172,6 +172,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); /* Supports zoned block devices sequential write constraint */ #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) +/* Supports scheduling on multiple hardware queues */ +#define ELEVATOR_F_MQ_AWARE (1U << 1) #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index 10485f0c9740..e272c3d452ce 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -17,7 +17,7 @@ struct compat_elf_siginfo compat_int_t si_errno; }; -struct compat_elf_prstatus +struct compat_elf_prstatus_common { struct compat_elf_siginfo pr_info; short pr_cursig; @@ -31,8 +31,6 @@ struct compat_elf_prstatus struct old_timeval32 pr_stime; struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; - compat_elf_gregset_t pr_reg; - compat_int_t pr_fpvalid; }; struct compat_elf_prpsinfo @@ -49,4 +47,15 @@ struct compat_elf_prpsinfo char pr_psargs[ELF_PRARGSZ]; }; +#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT +#include <asm/elfcore-compat.h> +#endif + +struct compat_elf_prstatus +{ + struct compat_elf_prstatus_common common; + compat_elf_gregset_t pr_reg; + compat_int_t pr_fpvalid; +}; + #endif /* _LINUX_ELFCORE_COMPAT_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index de51c1bef27d..2aaa15779d50 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -29,7 +29,7 @@ struct elf_siginfo * the SVR4 structure, but more Linuxy, with things that Linux does * not support and which gdb doesn't really use excluded. */ -struct elf_prstatus +struct elf_prstatus_common { struct elf_siginfo pr_info; /* Info associated with signal */ short pr_cursig; /* Current signal */ @@ -43,6 +43,11 @@ struct elf_prstatus struct __kernel_old_timeval pr_stime; /* System time */ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ +}; + +struct elf_prstatus +{ + struct elf_prstatus_common common; elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index ca86a00abe86..883acef895bc 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -2,6 +2,7 @@ #ifndef __LINUX_ENTRYCOMMON_H #define __LINUX_ENTRYCOMMON_H +#include <linux/static_call_types.h> #include <linux/tracehook.h> #include <linux/syscalls.h> #include <linux/seccomp.h> @@ -46,6 +47,7 @@ SYSCALL_WORK_SYSCALL_TRACE | \ SYSCALL_WORK_SYSCALL_AUDIT | \ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ + SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ ARCH_SYSCALL_WORK_EXIT) /* @@ -453,6 +455,9 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); * Conditional reschedule with additional sanity checks. */ void irqentry_exit_cond_resched(void); +#ifdef CONFIG_PREEMPT_DYNAMIC +DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); +#endif /** * irqentry_exit - Handle return from exception that used irqentry_enter() diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 9b93f8584ff7..8b2b1d68b954 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -47,6 +47,20 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); /** + * xfer_to_guest_mode_prepare - Perform last minute preparation work that + * need to be handled while IRQs are disabled + * upon entering to guest. + * + * Has to be invoked with interrupts disabled before the last call + * to xfer_to_guest_mode_work_pending(). + */ +static inline void xfer_to_guest_mode_prepare(void) +{ + lockdep_assert_irqs_disabled(); + rcu_nocb_flush_deferred_wakeup(); +} + +/** * __xfer_to_guest_mode_work_pending - Check if work is pending * * Returns: True if work pending, False otherwise. diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e3da25b51ae4..ec4cd3921c67 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -128,6 +128,8 @@ struct ethtool_link_ksettings { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; + u32 lanes; + enum ethtool_link_mode_bit_indices link_mode; }; /** @@ -265,6 +267,8 @@ struct ethtool_pause_stats { /** * struct ethtool_ops - optional netdev operations + * @cap_link_lanes_supported: indicates if the driver supports lanes + * parameter. * @supported_coalesce_params: supported types of interrupt coalescing. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not @@ -420,6 +424,7 @@ struct ethtool_pause_stats { * of the generic netdev features interface. */ struct ethtool_ops { + u32 cap_link_lanes_supported:1; u32 supported_coalesce_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 0350393465d4..593322c946e6 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -18,7 +18,7 @@ struct file; #ifdef CONFIG_EPOLL -#ifdef CONFIG_CHECKPOINT_RESTORE +#ifdef CONFIG_KCMP struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); #endif diff --git a/include/linux/export.h b/include/linux/export.h index fceb5e855717..6271a5d9c988 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -157,18 +157,9 @@ struct kernel_symbol { #define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "") #define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl") -#define EXPORT_SYMBOL_GPL_FUTURE(sym) _EXPORT_SYMBOL(sym, "_gpl_future") #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns) #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns) -#ifdef CONFIG_UNUSED_SYMBOLS -#define EXPORT_UNUSED_SYMBOL(sym) _EXPORT_SYMBOL(sym, "_unused") -#define EXPORT_UNUSED_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_unused_gpl") -#else -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) -#endif - #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 9f4d4bcbf251..fe848901fcc3 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -213,6 +213,7 @@ struct export_operations { bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); + u64 (*fetch_iversion)(struct inode *); #define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ #define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ #define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 7dc2a06cf19a..c6cc0a566ef5 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -274,6 +274,9 @@ struct f2fs_inode { __u8 i_compress_algorithm; /* compress algorithm */ __u8 i_log_cluster_size; /* log of cluster size */ __le16 i_compress_flag; /* compress flag */ + /* 0 bit: chksum flag + * [10,15] bits: compress level + */ __le32 i_extra_end[0]; /* for attribute size calculation */ } __packed; __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 921e750843e6..766fcd973beb 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -19,7 +19,7 @@ /* List of all valid flags for the how->resolve argument: */ #define VALID_RESOLVE_FLAGS \ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ - RESOLVE_BENEATH | RESOLVE_IN_ROOT) + RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED) /* List of all open_how "versions". */ #define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 29c27656165b..3b00fc906ccd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/sockptr.h> #include <crypto/sha1.h> +#include <linux/u64_stats_sync.h> #include <net/sch_generic.h> @@ -259,15 +260,32 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) -/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ -#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ +/* + * Atomic operations: + * + * BPF_ADD *(uint *) (dst_reg + off16) += src_reg + * BPF_AND *(uint *) (dst_reg + off16) &= src_reg + * BPF_OR *(uint *) (dst_reg + off16) |= src_reg + * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg + * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); + * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); + * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); + * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); + * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) + * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) + */ + +#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ - .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ - .imm = 0 }) + .imm = OP }) + +/* Legacy alias */ +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ @@ -522,6 +540,13 @@ struct bpf_binary_header { u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; +struct bpf_prog_stats { + u64 cnt; + u64 nsecs; + u64 misses; + struct u64_stats_sync syncp; +} __aligned(2 * sizeof(u64)); + struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ @@ -540,10 +565,12 @@ struct bpf_prog { u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; - struct bpf_prog_aux *aux; /* Auxiliary fields */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ + struct bpf_prog_stats __percpu *stats; + int __percpu *active; unsigned int (*bpf_func)(const void *ctx, const struct bpf_insn *insn); + struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ /* Instructions for interpreter */ struct sock_filter insns[0]; struct bpf_insn insnsi[]; @@ -564,7 +591,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); struct bpf_prog_stats *__stats; \ u64 __start = sched_clock(); \ __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - __stats = this_cpu_ptr(prog->aux->stats); \ + __stats = this_cpu_ptr(prog->stats); \ u64_stats_update_begin(&__stats->syncp); \ __stats->cnt++; \ __stats->nsecs += sched_clock() - __start; \ @@ -886,7 +913,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #define __bpf_call_base_args \ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ - __bpf_call_base) + (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); @@ -1281,6 +1308,11 @@ struct bpf_sysctl_kern { u64 tmp_reg; }; +#define BPF_SOCKOPT_KERN_BUF_SIZE 32 +struct bpf_sockopt_buf { + u8 data[BPF_SOCKOPT_KERN_BUF_SIZE]; +}; + struct bpf_sockopt_kern { struct sock *sk; u8 *optval; diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index a93d85932eb9..ebc295647581 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -6,7 +6,7 @@ #ifndef __STRATIX10_SVC_CLIENT_H #define __STRATIX10_SVC_CLIENT_H -/** +/* * Service layer driver supports client names * * fpga: for FPGA configuration @@ -15,7 +15,7 @@ #define SVC_CLIENT_FPGA "fpga" #define SVC_CLIENT_RSU "rsu" -/** +/* * Status of the sent command, in bit number * * SVC_STATUS_OK: @@ -50,7 +50,7 @@ #define SVC_STATUS_ERROR 5 #define SVC_STATUS_NO_SUPPORT 6 -/** +/* * Flag bit for COMMAND_RECONFIG * * COMMAND_RECONFIG_FLAG_PARTIAL: @@ -58,7 +58,7 @@ */ #define COMMAND_RECONFIG_FLAG_PARTIAL 1 -/** +/* * Timeout settings for service clients: * timeout value used in Stratix10 FPGA manager driver. * timeout value used in RSU driver @@ -218,7 +218,7 @@ void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr); int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg); /** - * intel_svc_done() - complete service request + * stratix10_svc_done() - complete service request * @chan: service channel assigned to the client * * This function is used by service client to inform service layer that diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 2a0da841c942..71177b17eee5 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -64,28 +64,27 @@ enum pm_api_id { PM_GET_API_VERSION = 1, PM_SYSTEM_SHUTDOWN = 12, PM_REQUEST_NODE = 13, - PM_RELEASE_NODE, - PM_SET_REQUIREMENT, + PM_RELEASE_NODE = 14, + PM_SET_REQUIREMENT = 15, PM_RESET_ASSERT = 17, - PM_RESET_GET_STATUS, + PM_RESET_GET_STATUS = 18, PM_PM_INIT_FINALIZE = 21, - PM_FPGA_LOAD, - PM_FPGA_GET_STATUS, + PM_FPGA_LOAD = 22, + PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, PM_IOCTL = 34, - PM_QUERY_DATA, - PM_CLOCK_ENABLE, - PM_CLOCK_DISABLE, - PM_CLOCK_GETSTATE, - PM_CLOCK_SETDIVIDER, - PM_CLOCK_GETDIVIDER, - PM_CLOCK_SETRATE, - PM_CLOCK_GETRATE, - PM_CLOCK_SETPARENT, - PM_CLOCK_GETPARENT, + PM_QUERY_DATA = 35, + PM_CLOCK_ENABLE = 36, + PM_CLOCK_DISABLE = 37, + PM_CLOCK_GETSTATE = 38, + PM_CLOCK_SETDIVIDER = 39, + PM_CLOCK_GETDIVIDER = 40, + PM_CLOCK_SETRATE = 41, + PM_CLOCK_GETRATE = 42, + PM_CLOCK_SETPARENT = 43, + PM_CLOCK_GETPARENT = 44, PM_SECURE_AES = 47, PM_FEATURE_CHECK = 63, - PM_API_MAX, }; /* PMU-FW return status codes */ @@ -93,21 +92,21 @@ enum pm_ret_status { XST_PM_SUCCESS = 0, XST_PM_NO_FEATURE = 19, XST_PM_INTERNAL = 2000, - XST_PM_CONFLICT, - XST_PM_NO_ACCESS, - XST_PM_INVALID_NODE, - XST_PM_DOUBLE_REQ, - XST_PM_ABORT_SUSPEND, + XST_PM_CONFLICT = 2001, + XST_PM_NO_ACCESS = 2002, + XST_PM_INVALID_NODE = 2003, + XST_PM_DOUBLE_REQ = 2004, + XST_PM_ABORT_SUSPEND = 2005, XST_PM_MULT_USER = 2008, }; enum pm_ioctl_id { IOCTL_SD_DLL_RESET = 6, - IOCTL_SET_SD_TAPDELAY, - IOCTL_SET_PLL_FRAC_MODE, - IOCTL_GET_PLL_FRAC_MODE, - IOCTL_SET_PLL_FRAC_DATA, - IOCTL_GET_PLL_FRAC_DATA, + IOCTL_SET_SD_TAPDELAY = 7, + IOCTL_SET_PLL_FRAC_MODE = 8, + IOCTL_GET_PLL_FRAC_MODE = 9, + IOCTL_SET_PLL_FRAC_DATA = 10, + IOCTL_GET_PLL_FRAC_DATA = 11, IOCTL_WRITE_GGS = 12, IOCTL_READ_GGS = 13, IOCTL_WRITE_PGGS = 14, @@ -117,185 +116,185 @@ enum pm_ioctl_id { }; enum pm_query_id { - PM_QID_INVALID, - PM_QID_CLOCK_GET_NAME, - PM_QID_CLOCK_GET_TOPOLOGY, - PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS, - PM_QID_CLOCK_GET_PARENTS, - PM_QID_CLOCK_GET_ATTRIBUTES, + PM_QID_INVALID = 0, + PM_QID_CLOCK_GET_NAME = 1, + PM_QID_CLOCK_GET_TOPOLOGY = 2, + PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3, + PM_QID_CLOCK_GET_PARENTS = 4, + PM_QID_CLOCK_GET_ATTRIBUTES = 5, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, - PM_QID_CLOCK_GET_MAX_DIVISOR, + PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; enum zynqmp_pm_reset_action { - PM_RESET_ACTION_RELEASE, - PM_RESET_ACTION_ASSERT, - PM_RESET_ACTION_PULSE, + PM_RESET_ACTION_RELEASE = 0, + PM_RESET_ACTION_ASSERT = 1, + PM_RESET_ACTION_PULSE = 2, }; enum zynqmp_pm_reset { ZYNQMP_PM_RESET_START = 1000, ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START, - ZYNQMP_PM_RESET_PCIE_BRIDGE, - ZYNQMP_PM_RESET_PCIE_CTRL, - ZYNQMP_PM_RESET_DP, - ZYNQMP_PM_RESET_SWDT_CRF, - ZYNQMP_PM_RESET_AFI_FM5, - ZYNQMP_PM_RESET_AFI_FM4, - ZYNQMP_PM_RESET_AFI_FM3, - ZYNQMP_PM_RESET_AFI_FM2, - ZYNQMP_PM_RESET_AFI_FM1, - ZYNQMP_PM_RESET_AFI_FM0, - ZYNQMP_PM_RESET_GDMA, - ZYNQMP_PM_RESET_GPU_PP1, - ZYNQMP_PM_RESET_GPU_PP0, - ZYNQMP_PM_RESET_GPU, - ZYNQMP_PM_RESET_GT, - ZYNQMP_PM_RESET_SATA, - ZYNQMP_PM_RESET_ACPU3_PWRON, - ZYNQMP_PM_RESET_ACPU2_PWRON, - ZYNQMP_PM_RESET_ACPU1_PWRON, - ZYNQMP_PM_RESET_ACPU0_PWRON, - ZYNQMP_PM_RESET_APU_L2, - ZYNQMP_PM_RESET_ACPU3, - ZYNQMP_PM_RESET_ACPU2, - ZYNQMP_PM_RESET_ACPU1, - ZYNQMP_PM_RESET_ACPU0, - ZYNQMP_PM_RESET_DDR, - ZYNQMP_PM_RESET_APM_FPD, - ZYNQMP_PM_RESET_SOFT, - ZYNQMP_PM_RESET_GEM0, - ZYNQMP_PM_RESET_GEM1, - ZYNQMP_PM_RESET_GEM2, - ZYNQMP_PM_RESET_GEM3, - ZYNQMP_PM_RESET_QSPI, - ZYNQMP_PM_RESET_UART0, - ZYNQMP_PM_RESET_UART1, - ZYNQMP_PM_RESET_SPI0, - ZYNQMP_PM_RESET_SPI1, - ZYNQMP_PM_RESET_SDIO0, - ZYNQMP_PM_RESET_SDIO1, - ZYNQMP_PM_RESET_CAN0, - ZYNQMP_PM_RESET_CAN1, - ZYNQMP_PM_RESET_I2C0, - ZYNQMP_PM_RESET_I2C1, - ZYNQMP_PM_RESET_TTC0, - ZYNQMP_PM_RESET_TTC1, - ZYNQMP_PM_RESET_TTC2, - ZYNQMP_PM_RESET_TTC3, - ZYNQMP_PM_RESET_SWDT_CRL, - ZYNQMP_PM_RESET_NAND, - ZYNQMP_PM_RESET_ADMA, - ZYNQMP_PM_RESET_GPIO, - ZYNQMP_PM_RESET_IOU_CC, - ZYNQMP_PM_RESET_TIMESTAMP, - ZYNQMP_PM_RESET_RPU_R50, - ZYNQMP_PM_RESET_RPU_R51, - ZYNQMP_PM_RESET_RPU_AMBA, - ZYNQMP_PM_RESET_OCM, - ZYNQMP_PM_RESET_RPU_PGE, - ZYNQMP_PM_RESET_USB0_CORERESET, - ZYNQMP_PM_RESET_USB1_CORERESET, - ZYNQMP_PM_RESET_USB0_HIBERRESET, - ZYNQMP_PM_RESET_USB1_HIBERRESET, - ZYNQMP_PM_RESET_USB0_APB, - ZYNQMP_PM_RESET_USB1_APB, - ZYNQMP_PM_RESET_IPI, - ZYNQMP_PM_RESET_APM_LPD, - ZYNQMP_PM_RESET_RTC, - ZYNQMP_PM_RESET_SYSMON, - ZYNQMP_PM_RESET_AFI_FM6, - ZYNQMP_PM_RESET_LPD_SWDT, - ZYNQMP_PM_RESET_FPD, - ZYNQMP_PM_RESET_RPU_DBG1, - ZYNQMP_PM_RESET_RPU_DBG0, - ZYNQMP_PM_RESET_DBG_LPD, - ZYNQMP_PM_RESET_DBG_FPD, - ZYNQMP_PM_RESET_APLL, - ZYNQMP_PM_RESET_DPLL, - ZYNQMP_PM_RESET_VPLL, - ZYNQMP_PM_RESET_IOPLL, - ZYNQMP_PM_RESET_RPLL, - ZYNQMP_PM_RESET_GPO3_PL_0, - ZYNQMP_PM_RESET_GPO3_PL_1, - ZYNQMP_PM_RESET_GPO3_PL_2, - ZYNQMP_PM_RESET_GPO3_PL_3, - ZYNQMP_PM_RESET_GPO3_PL_4, - ZYNQMP_PM_RESET_GPO3_PL_5, - ZYNQMP_PM_RESET_GPO3_PL_6, - ZYNQMP_PM_RESET_GPO3_PL_7, - ZYNQMP_PM_RESET_GPO3_PL_8, - ZYNQMP_PM_RESET_GPO3_PL_9, - ZYNQMP_PM_RESET_GPO3_PL_10, - ZYNQMP_PM_RESET_GPO3_PL_11, - ZYNQMP_PM_RESET_GPO3_PL_12, - ZYNQMP_PM_RESET_GPO3_PL_13, - ZYNQMP_PM_RESET_GPO3_PL_14, - ZYNQMP_PM_RESET_GPO3_PL_15, - ZYNQMP_PM_RESET_GPO3_PL_16, - ZYNQMP_PM_RESET_GPO3_PL_17, - ZYNQMP_PM_RESET_GPO3_PL_18, - ZYNQMP_PM_RESET_GPO3_PL_19, - ZYNQMP_PM_RESET_GPO3_PL_20, - ZYNQMP_PM_RESET_GPO3_PL_21, - ZYNQMP_PM_RESET_GPO3_PL_22, - ZYNQMP_PM_RESET_GPO3_PL_23, - ZYNQMP_PM_RESET_GPO3_PL_24, - ZYNQMP_PM_RESET_GPO3_PL_25, - ZYNQMP_PM_RESET_GPO3_PL_26, - ZYNQMP_PM_RESET_GPO3_PL_27, - ZYNQMP_PM_RESET_GPO3_PL_28, - ZYNQMP_PM_RESET_GPO3_PL_29, - ZYNQMP_PM_RESET_GPO3_PL_30, - ZYNQMP_PM_RESET_GPO3_PL_31, - ZYNQMP_PM_RESET_RPU_LS, - ZYNQMP_PM_RESET_PS_ONLY, - ZYNQMP_PM_RESET_PL, - ZYNQMP_PM_RESET_PS_PL0, - ZYNQMP_PM_RESET_PS_PL1, - ZYNQMP_PM_RESET_PS_PL2, - ZYNQMP_PM_RESET_PS_PL3, + ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001, + ZYNQMP_PM_RESET_PCIE_CTRL = 1002, + ZYNQMP_PM_RESET_DP = 1003, + ZYNQMP_PM_RESET_SWDT_CRF = 1004, + ZYNQMP_PM_RESET_AFI_FM5 = 1005, + ZYNQMP_PM_RESET_AFI_FM4 = 1006, + ZYNQMP_PM_RESET_AFI_FM3 = 1007, + ZYNQMP_PM_RESET_AFI_FM2 = 1008, + ZYNQMP_PM_RESET_AFI_FM1 = 1009, + ZYNQMP_PM_RESET_AFI_FM0 = 1010, + ZYNQMP_PM_RESET_GDMA = 1011, + ZYNQMP_PM_RESET_GPU_PP1 = 1012, + ZYNQMP_PM_RESET_GPU_PP0 = 1013, + ZYNQMP_PM_RESET_GPU = 1014, + ZYNQMP_PM_RESET_GT = 1015, + ZYNQMP_PM_RESET_SATA = 1016, + ZYNQMP_PM_RESET_ACPU3_PWRON = 1017, + ZYNQMP_PM_RESET_ACPU2_PWRON = 1018, + ZYNQMP_PM_RESET_ACPU1_PWRON = 1019, + ZYNQMP_PM_RESET_ACPU0_PWRON = 1020, + ZYNQMP_PM_RESET_APU_L2 = 1021, + ZYNQMP_PM_RESET_ACPU3 = 1022, + ZYNQMP_PM_RESET_ACPU2 = 1023, + ZYNQMP_PM_RESET_ACPU1 = 1024, + ZYNQMP_PM_RESET_ACPU0 = 1025, + ZYNQMP_PM_RESET_DDR = 1026, + ZYNQMP_PM_RESET_APM_FPD = 1027, + ZYNQMP_PM_RESET_SOFT = 1028, + ZYNQMP_PM_RESET_GEM0 = 1029, + ZYNQMP_PM_RESET_GEM1 = 1030, + ZYNQMP_PM_RESET_GEM2 = 1031, + ZYNQMP_PM_RESET_GEM3 = 1032, + ZYNQMP_PM_RESET_QSPI = 1033, + ZYNQMP_PM_RESET_UART0 = 1034, + ZYNQMP_PM_RESET_UART1 = 1035, + ZYNQMP_PM_RESET_SPI0 = 1036, + ZYNQMP_PM_RESET_SPI1 = 1037, + ZYNQMP_PM_RESET_SDIO0 = 1038, + ZYNQMP_PM_RESET_SDIO1 = 1039, + ZYNQMP_PM_RESET_CAN0 = 1040, + ZYNQMP_PM_RESET_CAN1 = 1041, + ZYNQMP_PM_RESET_I2C0 = 1042, + ZYNQMP_PM_RESET_I2C1 = 1043, + ZYNQMP_PM_RESET_TTC0 = 1044, + ZYNQMP_PM_RESET_TTC1 = 1045, + ZYNQMP_PM_RESET_TTC2 = 1046, + ZYNQMP_PM_RESET_TTC3 = 1047, + ZYNQMP_PM_RESET_SWDT_CRL = 1048, + ZYNQMP_PM_RESET_NAND = 1049, + ZYNQMP_PM_RESET_ADMA = 1050, + ZYNQMP_PM_RESET_GPIO = 1051, + ZYNQMP_PM_RESET_IOU_CC = 1052, + ZYNQMP_PM_RESET_TIMESTAMP = 1053, + ZYNQMP_PM_RESET_RPU_R50 = 1054, + ZYNQMP_PM_RESET_RPU_R51 = 1055, + ZYNQMP_PM_RESET_RPU_AMBA = 1056, + ZYNQMP_PM_RESET_OCM = 1057, + ZYNQMP_PM_RESET_RPU_PGE = 1058, + ZYNQMP_PM_RESET_USB0_CORERESET = 1059, + ZYNQMP_PM_RESET_USB1_CORERESET = 1060, + ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061, + ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062, + ZYNQMP_PM_RESET_USB0_APB = 1063, + ZYNQMP_PM_RESET_USB1_APB = 1064, + ZYNQMP_PM_RESET_IPI = 1065, + ZYNQMP_PM_RESET_APM_LPD = 1066, + ZYNQMP_PM_RESET_RTC = 1067, + ZYNQMP_PM_RESET_SYSMON = 1068, + ZYNQMP_PM_RESET_AFI_FM6 = 1069, + ZYNQMP_PM_RESET_LPD_SWDT = 1070, + ZYNQMP_PM_RESET_FPD = 1071, + ZYNQMP_PM_RESET_RPU_DBG1 = 1072, + ZYNQMP_PM_RESET_RPU_DBG0 = 1073, + ZYNQMP_PM_RESET_DBG_LPD = 1074, + ZYNQMP_PM_RESET_DBG_FPD = 1075, + ZYNQMP_PM_RESET_APLL = 1076, + ZYNQMP_PM_RESET_DPLL = 1077, + ZYNQMP_PM_RESET_VPLL = 1078, + ZYNQMP_PM_RESET_IOPLL = 1079, + ZYNQMP_PM_RESET_RPLL = 1080, + ZYNQMP_PM_RESET_GPO3_PL_0 = 1081, + ZYNQMP_PM_RESET_GPO3_PL_1 = 1082, + ZYNQMP_PM_RESET_GPO3_PL_2 = 1083, + ZYNQMP_PM_RESET_GPO3_PL_3 = 1084, + ZYNQMP_PM_RESET_GPO3_PL_4 = 1085, + ZYNQMP_PM_RESET_GPO3_PL_5 = 1086, + ZYNQMP_PM_RESET_GPO3_PL_6 = 1087, + ZYNQMP_PM_RESET_GPO3_PL_7 = 1088, + ZYNQMP_PM_RESET_GPO3_PL_8 = 1089, + ZYNQMP_PM_RESET_GPO3_PL_9 = 1090, + ZYNQMP_PM_RESET_GPO3_PL_10 = 1091, + ZYNQMP_PM_RESET_GPO3_PL_11 = 1092, + ZYNQMP_PM_RESET_GPO3_PL_12 = 1093, + ZYNQMP_PM_RESET_GPO3_PL_13 = 1094, + ZYNQMP_PM_RESET_GPO3_PL_14 = 1095, + ZYNQMP_PM_RESET_GPO3_PL_15 = 1096, + ZYNQMP_PM_RESET_GPO3_PL_16 = 1097, + ZYNQMP_PM_RESET_GPO3_PL_17 = 1098, + ZYNQMP_PM_RESET_GPO3_PL_18 = 1099, + ZYNQMP_PM_RESET_GPO3_PL_19 = 1100, + ZYNQMP_PM_RESET_GPO3_PL_20 = 1101, + ZYNQMP_PM_RESET_GPO3_PL_21 = 1102, + ZYNQMP_PM_RESET_GPO3_PL_22 = 1103, + ZYNQMP_PM_RESET_GPO3_PL_23 = 1104, + ZYNQMP_PM_RESET_GPO3_PL_24 = 1105, + ZYNQMP_PM_RESET_GPO3_PL_25 = 1106, + ZYNQMP_PM_RESET_GPO3_PL_26 = 1107, + ZYNQMP_PM_RESET_GPO3_PL_27 = 1108, + ZYNQMP_PM_RESET_GPO3_PL_28 = 1109, + ZYNQMP_PM_RESET_GPO3_PL_29 = 1110, + ZYNQMP_PM_RESET_GPO3_PL_30 = 1111, + ZYNQMP_PM_RESET_GPO3_PL_31 = 1112, + ZYNQMP_PM_RESET_RPU_LS = 1113, + ZYNQMP_PM_RESET_PS_ONLY = 1114, + ZYNQMP_PM_RESET_PL = 1115, + ZYNQMP_PM_RESET_PS_PL0 = 1116, + ZYNQMP_PM_RESET_PS_PL1 = 1117, + ZYNQMP_PM_RESET_PS_PL2 = 1118, + ZYNQMP_PM_RESET_PS_PL3 = 1119, ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3 }; enum zynqmp_pm_suspend_reason { SUSPEND_POWER_REQUEST = 201, - SUSPEND_ALERT, - SUSPEND_SYSTEM_SHUTDOWN, + SUSPEND_ALERT = 202, + SUSPEND_SYSTEM_SHUTDOWN = 203, }; enum zynqmp_pm_request_ack { ZYNQMP_PM_REQUEST_ACK_NO = 1, - ZYNQMP_PM_REQUEST_ACK_BLOCKING, - ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING, + ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2, + ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3, }; enum pm_node_id { NODE_SD_0 = 39, - NODE_SD_1, + NODE_SD_1 = 40, }; enum tap_delay_type { PM_TAPDELAY_INPUT = 0, - PM_TAPDELAY_OUTPUT, + PM_TAPDELAY_OUTPUT = 1, }; enum dll_reset_type { - PM_DLL_RESET_ASSERT, - PM_DLL_RESET_RELEASE, - PM_DLL_RESET_PULSE, + PM_DLL_RESET_ASSERT = 0, + PM_DLL_RESET_RELEASE = 1, + PM_DLL_RESET_PULSE = 2, }; enum zynqmp_pm_shutdown_type { - ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN, - ZYNQMP_PM_SHUTDOWN_TYPE_RESET, - ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY, + ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0, + ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1, + ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2, }; enum zynqmp_pm_shutdown_subtype { - ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM, - ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY, - ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2, }; /** diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h index 8396013785ef..281cb4f83dbe 100644 --- a/include/linux/fixp-arith.h +++ b/include/linux/fixp-arith.h @@ -141,4 +141,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi) #define fixp_cos32_rad(rad, twopi) \ fixp_sin32_rad(rad + twopi / 4, twopi) +/** + * fixp_linear_interpolate() - interpolates a value from two known points + * + * @x0: x value of point 0 + * @y0: y value of point 0 + * @x1: x value of point 1 + * @y1: y value of point 1 + * @x: the linear interpolant + */ +static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x) +{ + if (y0 == y1 || x == x0) + return y0; + if (x1 == x0 || x == x1) + return y1; + + return y0 + ((y1 - y0) * (x - x0) / (x1 - x0)); +} + #endif diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h new file mode 100644 index 000000000000..c1be37437e77 --- /dev/null +++ b/include/linux/fortify-string.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FORTIFY_STRING_H_ +#define _LINUX_FORTIFY_STRING_H_ + + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + +__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 1); + + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_strncpy(p, q, size); +} + +__FORTIFY_INLINE char *strcat(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 1); + + if (p_size == (size_t)-1) + return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +} + +__FORTIFY_INLINE __kernel_size_t strlen(const char *p) +{ + __kernel_size_t ret; + size_t p_size = __builtin_object_size(p, 1); + + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) + return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); + return ret; +} + +extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); +__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +{ + size_t p_size = __builtin_object_size(p, 1); + __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); + + if (p_size <= ret && maxlen != ret) + fortify_panic(__func__); + return ret; +} + +/* defined after fortified strlen to reuse it */ +extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +{ + size_t ret; + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __real_strlcpy(p, q, size); + ret = strlen(q); + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + + if (__builtin_constant_p(len) && len >= p_size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); + __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +} + +/* defined after fortified strnlen to reuse it */ +extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); +__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) +{ + size_t len; + /* Use string size rather than possible enclosing struct size. */ + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + /* If we cannot get size of p and q default to call strscpy. */ + if (p_size == (size_t) -1 && q_size == (size_t) -1) + return __real_strscpy(p, q, size); + + /* + * If size can be known at compile time and is greater than + * p_size, generate a compile time write overflow error. + */ + if (__builtin_constant_p(size) && size > p_size) + __write_overflow(); + + /* + * This call protects from read overflow, because len will default to q + * length if it smaller than size. + */ + len = strnlen(q, size); + /* + * If len equals size, we will copy only size bytes which leads to + * -E2BIG being returned. + * Otherwise we will copy len + 1 because of the final '\O'. + */ + len = len == size ? size : len + 1; + + /* + * Generate a runtime write overflow error if len is greater than + * p_size. + */ + if (len > p_size) + fortify_panic(__func__); + + /* + * We can now safely call vanilla strscpy because we are protected from: + * 1. Read overflow thanks to call to strnlen(). + * 2. Write overflow thanks to above ifs. + */ + return __real_strscpy(p, q, len); +} + +/* defined after fortified strlen and strnlen to reuse them */ +__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) +{ + size_t p_len, copy_len; + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); + __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; +} + +__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memset(p, c, size); +} + +__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcpy(p, q, size); +} + +__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memmove(p, q, size); +} + +extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memscan(p, c, size); +} + +__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __read_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcmp(p, q, size); +} + +__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memchr(p, c, size); +} + +void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memchr_inv(p, c, size); +} + +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); +__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_kmemdup(p, size, gfp); +} + +/* defined after fortified strlen and memcpy to reuse them */ +__FORTIFY_INLINE char *strcpy(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + size_t size; + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strcpy(p, q); + size = strlen(q) + 1; + /* test here to use the more stringent object size */ + if (p_size < size) + fortify_panic(__func__); + memcpy(p, q, size); + return p; +} + +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy + +#endif /* _LINUX_FORTIFY_STRING_H_ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index fd47deea7c17..ec8f3ddf4a6a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -39,6 +39,8 @@ #include <linux/fs_types.h> #include <linux/build_bug.h> #include <linux/stddef.h> +#include <linux/mount.h> +#include <linux/cred.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -1572,6 +1574,52 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } +static inline kuid_t kuid_into_mnt(struct user_namespace *mnt_userns, + kuid_t kuid) +{ + return make_kuid(mnt_userns, __kuid_val(kuid)); +} + +static inline kgid_t kgid_into_mnt(struct user_namespace *mnt_userns, + kgid_t kgid) +{ + return make_kgid(mnt_userns, __kgid_val(kgid)); +} + +static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return kuid_into_mnt(mnt_userns, inode->i_uid); +} + +static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return kgid_into_mnt(mnt_userns, inode->i_gid); +} + +static inline kuid_t kuid_from_mnt(struct user_namespace *mnt_userns, + kuid_t kuid) +{ + return KUIDT_INIT(from_kuid(mnt_userns, kuid)); +} + +static inline kgid_t kgid_from_mnt(struct user_namespace *mnt_userns, + kgid_t kgid) +{ + return KGIDT_INIT(from_kgid(mnt_userns, kgid)); +} + +static inline kuid_t fsuid_into_mnt(struct user_namespace *mnt_userns) +{ + return kuid_from_mnt(mnt_userns, current_fsuid()); +} + +static inline kgid_t fsgid_into_mnt(struct user_namespace *mnt_userns) +{ + return kgid_from_mnt(mnt_userns, current_fsgid()); +} + extern struct timespec64 current_time(struct inode *inode); /* @@ -1714,28 +1762,48 @@ static inline bool sb_start_intwrite_trylock(struct super_block *sb) return __sb_start_write_trylock(sb, SB_FREEZE_FS); } - -extern bool inode_owner_or_capable(const struct inode *inode); +bool inode_owner_or_capable(struct user_namespace *mnt_userns, + const struct inode *inode); /* * VFS helper functions.. */ -extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); -extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); -extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); -extern int vfs_symlink(struct inode *, struct dentry *, const char *); -extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **); -extern int vfs_rmdir(struct inode *, struct dentry *); -extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); -extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); +int vfs_create(struct user_namespace *, struct inode *, + struct dentry *, umode_t, bool); +int vfs_mkdir(struct user_namespace *, struct inode *, + struct dentry *, umode_t); +int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *, + umode_t, dev_t); +int vfs_symlink(struct user_namespace *, struct inode *, + struct dentry *, const char *); +int vfs_link(struct dentry *, struct user_namespace *, struct inode *, + struct dentry *, struct inode **); +int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *); +int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *, + struct inode **); + +struct renamedata { + struct user_namespace *old_mnt_userns; + struct inode *old_dir; + struct dentry *old_dentry; + struct user_namespace *new_mnt_userns; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +} __randomize_layout; -static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry) +int vfs_rename(struct renamedata *); + +static inline int vfs_whiteout(struct user_namespace *mnt_userns, + struct inode *dir, struct dentry *dentry) { - return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); + return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE, + WHITEOUT_DEV); } -extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, - int open_flag); +struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns, + struct dentry *dentry, umode_t mode, int open_flag); int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), @@ -1757,8 +1825,8 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, /* * VFS file helper functions. */ -extern void inode_init_owner(struct inode *inode, const struct inode *dir, - umode_t mode); +void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, + const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); /* @@ -1862,22 +1930,28 @@ struct file_operations { struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); - int (*permission) (struct inode *, int); + int (*permission) (struct user_namespace *, struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); - int (*create) (struct inode *,struct dentry *, umode_t, bool); + int (*create) (struct user_namespace *, struct inode *,struct dentry *, + umode_t, bool); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); - int (*symlink) (struct inode *,struct dentry *,const char *); - int (*mkdir) (struct inode *,struct dentry *,umode_t); + int (*symlink) (struct user_namespace *, struct inode *,struct dentry *, + const char *); + int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, + umode_t); int (*rmdir) (struct inode *,struct dentry *); - int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); - int (*rename) (struct inode *, struct dentry *, + int (*mknod) (struct user_namespace *, struct inode *,struct dentry *, + umode_t,dev_t); + int (*rename) (struct user_namespace *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); - int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); + int (*setattr) (struct user_namespace *, struct dentry *, + struct iattr *); + int (*getattr) (struct user_namespace *, const struct path *, + struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); @@ -1885,8 +1959,10 @@ struct inode_operations { int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, umode_t create_mode); - int (*tmpfile) (struct inode *, struct dentry *, umode_t); - int (*set_acl)(struct inode *, struct posix_acl *, int); + int (*tmpfile) (struct user_namespace *, struct inode *, + struct dentry *, umode_t); + int (*set_acl)(struct user_namespace *, struct inode *, + struct posix_acl *, int); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -2035,9 +2111,11 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) -static inline bool HAS_UNMAPPED_ID(struct inode *inode) +static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, + struct inode *inode) { - return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); + return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || + !gid_valid(i_gid_into_mnt(mnt_userns, inode)); } static inline enum rw_hint file_write_hint(struct file *file) @@ -2084,8 +2162,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, /* * Inode state bits. Protected by inode->i_lock * - * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, - * I_DIRTY_DATASYNC and I_DIRTY_PAGES. + * Four bits determine the dirty state of the inode: I_DIRTY_SYNC, + * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME. * * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at @@ -2094,12 +2172,20 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * Two bits are used for locking and completion notification, I_NEW and I_SYNC. * * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on - * fdatasync(). i_atime is the usual cause. - * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of + * fdatasync() (unless I_DIRTY_DATASYNC is also set). + * Timestamp updates are the usual cause. + * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of * these changes separately from I_DIRTY_SYNC so that we * don't have to write inode on fdatasync() when only - * mtime has changed in it. + * e.g. the timestamps have changed. * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. + * I_DIRTY_TIME The inode itself only has dirty timestamps, and the + * lazytime mount option is enabled. We keep track of this + * separately from I_DIRTY_SYNC in order to implement + * lazytime. This gets cleared if I_DIRTY_INODE + * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e. + * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in + * i_state, but not both. I_DIRTY_PAGES may still be set. * I_NEW Serves as both a mutex and completion notification. * New inodes set I_NEW. If two processes both create * the same inode, one of them will release its inode and @@ -2186,6 +2272,21 @@ static inline void mark_inode_dirty_sync(struct inode *inode) __mark_inode_dirty(inode, I_DIRTY_SYNC); } +/* + * Returns true if the given inode itself only has dirty timestamps (its pages + * may still be dirty) and isn't currently being allocated or freed. + * Filesystems should call this if when writing an inode when lazytime is + * enabled, they want to opportunistically write the timestamps of other inodes + * located very nearby on-disk, e.g. in the same inode block. This returns true + * if the given inode is in need of such an opportunistic update. Requires + * i_lock, or at least later re-checking under i_lock. + */ +static inline bool inode_is_dirtytime_only(struct inode *inode) +{ + return (inode->i_state & (I_DIRTY_TIME | I_NEW | + I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME; +} + extern void inc_nlink(struct inode *inode); extern void drop_nlink(struct inode *inode); extern void clear_nlink(struct inode *inode); @@ -2231,6 +2332,7 @@ struct file_system_type { #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ +#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ #define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); @@ -2517,9 +2619,13 @@ struct filename { }; static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); +static inline struct user_namespace *file_mnt_user_ns(struct file *file) +{ + return mnt_user_ns(file->f_path.mnt); +} extern long vfs_truncate(const struct path *, loff_t); -extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, - struct file *filp); +int do_truncate(struct user_namespace *, struct dentry *, loff_t start, + unsigned int time_attrs, struct file *filp); extern int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, @@ -2756,10 +2862,22 @@ static inline int bmap(struct inode *inode, sector_t *block) } #endif -extern int notify_change(struct dentry *, struct iattr *, struct inode **); -extern int inode_permission(struct inode *, int); -extern int generic_permission(struct inode *, int); -extern int __check_sticky(struct inode *dir, struct inode *inode); +int notify_change(struct user_namespace *, struct dentry *, + struct iattr *, struct inode **); +int inode_permission(struct user_namespace *, struct inode *, int); +int generic_permission(struct user_namespace *, struct inode *, int); +static inline int file_permission(struct file *file, int mask) +{ + return inode_permission(file_mnt_user_ns(file), + file_inode(file), mask); +} +static inline int path_permission(const struct path *path, int mask) +{ + return inode_permission(mnt_user_ns(path->mnt), + d_inode(path->dentry), mask); +} +int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir, + struct inode *inode); static inline bool execute_ok(struct inode *inode) { @@ -2962,8 +3080,8 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); -extern ssize_t generic_file_buffered_read(struct kiocb *iocb, - struct iov_iter *to, ssize_t already_read); +ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to, + ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); @@ -3090,7 +3208,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len, extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -extern void generic_fillattr(struct inode *, struct kstat *); +void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); @@ -3140,15 +3258,18 @@ extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); -extern int simple_setattr(struct dentry *, struct iattr *); -extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern int simple_setattr(struct user_namespace *, struct dentry *, + struct iattr *); +extern int simple_getattr(struct user_namespace *, const struct path *, + struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); -extern int simple_rename(struct inode *, struct dentry *, - struct inode *, struct dentry *, unsigned int); +extern int simple_rename(struct user_namespace *, struct inode *, + struct dentry *, struct inode *, struct dentry *, + unsigned int); extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); @@ -3192,11 +3313,6 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); -#ifdef CONFIG_UNICODE -extern int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str); -extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, - const char *str, const struct qstr *name); -#endif extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); #ifdef CONFIG_MIGRATION @@ -3211,9 +3327,10 @@ extern int buffer_migrate_page_norefs(struct address_space *, #define buffer_migrate_page_norefs NULL #endif -extern int setattr_prepare(struct dentry *, struct iattr *); +int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); -extern void setattr_copy(struct inode *inode, const struct iattr *attr); +void setattr_copy(struct user_namespace *, struct inode *inode, + const struct iattr *attr); extern int file_update_time(struct file *file); @@ -3377,12 +3494,13 @@ static inline bool is_sxid(umode_t mode) return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } -static inline int check_sticky(struct inode *dir, struct inode *inode) +static inline int check_sticky(struct user_namespace *mnt_userns, + struct inode *dir, struct inode *inode) { if (!(dir->i_mode & S_ISVTX)) return 0; - return __check_sticky(dir, inode); + return __check_sticky(mnt_userns, dir, inode); } static inline void inode_has_no_xattr(struct inode *inode) diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index db244874e834..63b56aba925a 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/interrupt.h> +#include <uapi/linux/fsl_mc.h> #define FSL_MC_VENDOR_FREESCALE 0x1957 @@ -209,8 +210,6 @@ struct fsl_mc_device { #define to_fsl_mc_device(_dev) \ container_of(_dev, struct fsl_mc_device, dev) -#define MC_CMD_NUM_OF_PARAMS 7 - struct mc_cmd_header { u8 src_id; u8 flags_hw; @@ -220,11 +219,6 @@ struct mc_cmd_header { __le16 cmd_id; }; -struct fsl_mc_command { - __le64 header; - __le64 params[MC_CMD_NUM_OF_PARAMS]; -}; - enum mc_cmd_status { MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index a2e42d3cd87c..e5409b83e731 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -470,6 +470,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry) /* create a new group */ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); +extern struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops); /* get reference to a group */ extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index c1144a450392..b568b3c7d095 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -138,6 +138,10 @@ int fsverity_file_open(struct inode *inode, struct file *filp); int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); void fsverity_cleanup_inode(struct inode *inode); +/* read_metadata.c */ + +int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg); + /* verify.c */ bool fsverity_verify_page(struct page *page); @@ -183,6 +187,14 @@ static inline void fsverity_cleanup_inode(struct inode *inode) { } +/* read_metadata.c */ + +static inline int fsverity_ioctl_read_metadata(struct file *filp, + const void __user *uarg) +{ + return -EOPNOTSUPP; +} + /* verify.c */ static inline bool fsverity_verify_page(struct page *page) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9a8ce28e4485..86e5028bfa20 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -485,7 +485,6 @@ struct dyn_ftrace { struct dyn_arch_ftrace arch; }; -int ftrace_force_update(void); int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, @@ -740,7 +739,6 @@ extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); #else /* CONFIG_DYNAMIC_FTRACE */ static inline int skip_trace(unsigned long ip) { return 0; } -static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } static inline void ftrace_module_init(struct module *mod) { } diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index fde4ad97564c..ed4e67a7ff1c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/list.h> +#include <linux/err.h> struct fwnode_operations; struct device; @@ -18,9 +19,13 @@ struct device; /* * fwnode link flags * - * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. + * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. + * NOT_DEVICE: The fwnode will never be populated as a struct device. + * INITIALIZED: The hardware corresponding to fwnode has been initialized. */ #define FWNODE_FLAG_LINKS_ADDED BIT(0) +#define FWNODE_FLAG_NOT_DEVICE BIT(1) +#define FWNODE_FLAG_INITIALIZED BIT(2) struct fwnode_handle { struct fwnode_handle *secondary; @@ -50,6 +55,13 @@ struct fwnode_endpoint { const struct fwnode_handle *local_fwnode; }; +/* + * ports and endpoints defined as software_nodes should all follow a common + * naming scheme; use these macros to ensure commonality. + */ +#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" +#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u" + #define NR_FWNODE_REFERENCE_ARGS 8 /** @@ -159,7 +171,20 @@ static inline void fwnode_init(struct fwnode_handle *fwnode, INIT_LIST_HEAD(&fwnode->suppliers); } +static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode, + bool initialized) +{ + if (IS_ERR_OR_NULL(fwnode)) + return; + + if (initialized) + fwnode->flags |= FWNODE_FLAG_INITIALIZED; + else + fwnode->flags &= ~FWNODE_FLAG_INITIALIZED; +} + extern u32 fw_devlink_get_flags(void); +extern bool fw_devlink_is_strict(void); int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); void fwnode_links_purge(struct fwnode_handle *fwnode); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 809aaa32d53c..f364619092cc 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -32,6 +32,7 @@ extern struct class block_class; #include <linux/string.h> #include <linux/fs.h> #include <linux/workqueue.h> +#include <linux/xarray.h> #define PARTITION_META_INFO_VOLNAMELTH 64 /* @@ -116,13 +117,6 @@ enum { DISK_EVENT_FLAG_UEVENT = 1 << 1, }; -struct disk_part_tbl { - struct rcu_head rcu_head; - int len; - struct block_device __rcu *last_lookup; - struct block_device __rcu *part[]; -}; - struct disk_events; struct badblocks; @@ -148,12 +142,7 @@ struct gendisk { unsigned short events; /* supported events */ unsigned short event_flags; /* flags related to event processing */ - /* Array of pointers to partitions indexed by partno. - * Protected with matching bdev lock but stat and other - * non-critical accesses use RCU. Always access through - * helpers. - */ - struct disk_part_tbl __rcu *part_tbl; + struct xarray part_tbl; struct block_device *part0; const struct block_device_operations *fops; @@ -163,6 +152,7 @@ struct gendisk { int flags; unsigned long state; #define GD_NEED_PART_SCAN 0 +#define GD_READ_ONLY 1 struct kobject *slave_dir; struct timer_rand_state *random; @@ -212,10 +202,11 @@ static inline dev_t disk_devt(struct gendisk *disk) return MKDEV(disk->major, disk->first_minor); } +void disk_uevent(struct gendisk *disk, enum kobject_action action); + /* * Smarter partition iterator without context limits. */ -#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ #define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ @@ -223,7 +214,7 @@ static inline dev_t disk_devt(struct gendisk *disk) struct disk_part_iter { struct gendisk *disk; struct block_device *part; - int idx; + unsigned long idx; unsigned int flags; }; @@ -231,7 +222,6 @@ extern void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, unsigned int flags); struct block_device *disk_part_iter_next(struct disk_part_iter *piter); extern void disk_part_iter_exit(struct disk_part_iter *piter); -extern bool disk_has_partitions(struct gendisk *disk); /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, @@ -249,11 +239,12 @@ static inline void add_disk_no_queue_reg(struct gendisk *disk) extern void del_gendisk(struct gendisk *gp); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); -extern void set_disk_ro(struct gendisk *disk, int flag); +void set_disk_ro(struct gendisk *disk, bool read_only); static inline int get_disk_ro(struct gendisk *disk) { - return disk->part0->bd_read_only; + return disk->part0->bd_read_only || + test_bit(GD_READ_ONLY, &disk->state); } extern void disk_block_events(struct gendisk *disk); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6e479e9c48ce..8572a1474e16 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -8,6 +8,20 @@ #include <linux/linkage.h> #include <linux/topology.h> +/* The typedef is in types.h but we want the documentation here */ +#if 0 +/** + * typedef gfp_t - Memory allocation flags. + * + * GFP flags are commonly used throughout Linux to indicate how memory + * should be allocated. The GFP acronym stands for get_free_pages(), + * the underlying memory allocation function. Not every GFP flag is + * supported by every function which may allocate memory. Most users + * will want to use a plain ``GFP_KERNEL``. + */ +typedef unsigned int __bitwise gfp_t; +#endif + struct vm_area_struct; /* @@ -583,8 +597,16 @@ extern void free_pages(unsigned long addr, unsigned int order); struct page_frag_cache; extern void __page_frag_cache_drain(struct page *page, unsigned int count); -extern void *page_frag_alloc(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask); +extern void *page_frag_alloc_align(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask, + unsigned int align_mask); + +static inline void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask) +{ + return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); +} + extern void page_frag_free(void *addr); #define __free_page(page) __free_pages((page), 0) @@ -612,6 +634,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); extern void pm_restrict_gfp_mask(void); extern void pm_restore_gfp_mask(void); +extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); + #ifdef CONFIG_PM_SLEEP extern bool pm_suspended_storage(void); #else diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index ef49307611d2..c73b25bc9213 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -674,6 +674,8 @@ struct acpi_gpio_mapping { * get GpioIo type explicitly, this quirk may be used. */ #define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) +/* Use given pin as an absolute GPIO number in the system */ +#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2) unsigned int quirks; }; diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 781a053abbb9..d755e529c1e3 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -75,7 +75,7 @@ struct gpiod_hog { * gpiod_get_index() */ #define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \ -{ \ +(struct gpiod_lookup) { \ .key = _key, \ .chip_hwnum = _chip_hwnum, \ .con_id = _con_id, \ @@ -87,7 +87,7 @@ struct gpiod_hog { * Simple definition of a single GPIO hog in an array. */ #define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \ -{ \ +(struct gpiod_hog) { \ .chip_label = _chip_label, \ .chip_hwnum = _chip_hwnum, \ .line_name = _line_name, \ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9850d59d6f1c..c8ec982ff498 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -156,7 +156,7 @@ enum hdmi_content_type { }; enum hdmi_metadata_type { - HDMI_STATIC_METADATA_TYPE1 = 1, + HDMI_STATIC_METADATA_TYPE1 = 0, }; enum hdmi_eotf { diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 46bcef380446..763802b2b8f9 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -150,7 +150,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, * @info: return information about attribute after parsing report * * Parses report and returns the attribute information such as report id, -* field index, units and exponet etc. +* field index, units and exponent etc. */ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, u8 type, @@ -167,7 +167,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, * @is_signed: If true then fields < 32 bits will be sign-extended * * Issues a synchronous or asynchronous read request for an input attribute. -* Returns data upto 32 bits. +* Return: data up to 32 bits. */ enum sensor_hub_read_flags { @@ -205,8 +205,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, * @buffer: buffer to copy output * * Used to get a field in feature report. For example this can get polling -* interval, sensitivity, activate/deactivate state. On success it returns -* number of bytes copied to buffer. On failure, it returns value < 0. +* interval, sensitivity, activate/deactivate state. +* Return: On success, it returns the number of bytes copied to buffer. +* On failure, it returns value < 0. */ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, u32 field_index, int buffer_size, void *buffer); diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index d82a97e311d3..3bbdbccc5805 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -128,6 +128,10 @@ #define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15 /* Common selectors */ +#define HID_USAGE_SENSOR_PROP_DESC 0x200300 +#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301 +#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307 +#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305 #define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E #define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F #define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310 @@ -158,4 +162,14 @@ #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840 #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841 +/* Custom Sensor (2000e1) */ +#define HID_USAGE_SENSOR_HINGE 0x20020B +#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400 +#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B +#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541 +#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543 +/* Custom Sensor data 28=>x>=0 */ +#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \ + (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x)) + #endif diff --git a/include/linux/hid.h b/include/linux/hid.h index c39d71eb1fd0..ef702b3f56e3 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -918,7 +918,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, /** * hid_device_io_start - enable HID input during probe, remove * - * @hid - the device + * @hid: the device * * This should only be called during probe or remove and only be * called by the thread calling probe or remove. It will allow @@ -936,7 +936,7 @@ static inline void hid_device_io_start(struct hid_device *hid) { /** * hid_device_io_stop - disable HID input during probe, remove * - * @hid - the device + * @hid: the device * * Should only be called after hid_device_io_start. It will prevent * incoming packets from going to the driver for the duration of @@ -1010,6 +1010,13 @@ static inline void hid_map_usage(struct hid_input *hidinput, /** * hid_map_usage_clear - map usage input bits and clear the input bit * + * @hidinput: hidinput which we are interested in + * @usage: usage to fill in + * @bit: pointer to input->{}bit (out parameter) + * @max: maximal valid usage->code to consider later (out parameter) + * @type: input event type (EV_KEY, EV_REL, ...) + * @c: code which corresponds to this usage and type + * * The same as hid_map_usage, except the @c bit is also cleared in supported * bits (@bit). */ @@ -1084,7 +1091,7 @@ static inline void hid_hw_request(struct hid_device *hdev, * @rtype: HID report type * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT * - * @return: count of data transfered, negative if error + * Return: count of data transferred, negative if error * * Same behavior as hid_hw_request, but with raw buffers instead. */ @@ -1106,7 +1113,7 @@ static inline int hid_hw_raw_request(struct hid_device *hdev, * @buf: raw data to transfer * @len: length of buf * - * @return: count of data transfered, negative if error + * Return: count of data transferred, negative if error */ static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 1bbe96dc8be6..7902c7d8b55f 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -127,11 +127,6 @@ static inline unsigned long totalhigh_pages(void) return (unsigned long)atomic_long_read(&_totalhigh_pages); } -static inline void totalhigh_pages_inc(void) -{ - atomic_long_inc(&_totalhigh_pages); -} - static inline void totalhigh_pages_add(long count) { atomic_long_add(count, &_totalhigh_pages); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d2c70d3772a3..44170f312ae7 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -276,4 +276,60 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif +static inline void memcpy_page(struct page *dst_page, size_t dst_off, + struct page *src_page, size_t src_off, + size_t len) +{ + char *dst = kmap_local_page(dst_page); + char *src = kmap_local_page(src_page); + + VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); + memcpy(dst + dst_off, src + src_off, len); + kunmap_local(src); + kunmap_local(dst); +} + +static inline void memmove_page(struct page *dst_page, size_t dst_off, + struct page *src_page, size_t src_off, + size_t len) +{ + char *dst = kmap_local_page(dst_page); + char *src = kmap_local_page(src_page); + + VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); + memmove(dst + dst_off, src + src_off, len); + kunmap_local(src); + kunmap_local(dst); +} + +static inline void memset_page(struct page *page, size_t offset, int val, + size_t len) +{ + char *addr = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memset(addr + offset, val, len); + kunmap_local(addr); +} + +static inline void memcpy_from_page(char *to, struct page *page, + size_t offset, size_t len) +{ + char *from = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memcpy(to, from + offset, len); + kunmap_local(from); +} + +static inline void memcpy_to_page(struct page *page, size_t offset, + const char *from, size_t len) +{ + char *to = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); + memcpy(to + offset, from, len); + kunmap_local(to); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 6a19f35f836b..ba973efcd369 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -78,6 +78,7 @@ static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, } enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_NEVER_DAX, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, @@ -123,6 +124,13 @@ extern unsigned long transparent_hugepage_flags; */ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { + + /* + * If the hardware/firmware marked hugepage support disabled. + */ + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) + return false; + if (vma->vm_flags & VM_NOHUGEPAGE) return false; @@ -134,12 +142,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; - /* - * For dax vmas, try to always use hugepage mappings. If the kernel does - * not support hugepages, fsdax mappings will fallback to PAGE_SIZE - * mappings, and device-dax namespaces, that try to guarantee a given - * mapping size, will fail to enable - */ + if (vma_is_dax(vma)) return true; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ebca2ef02212..cccd1aab69dd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -37,7 +37,7 @@ struct hugepage_subpool { struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ - /* sasitfy minimum size. */ + /* satisfy minimum size. */ }; struct resv_map { @@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); -int hugetlb_reserve_pages(struct inode *inode, long from, long to, +bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, @@ -472,6 +472,84 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ +/* + * huegtlb page specific state flags. These flags are located in page.private + * of the hugetlb head page. Functions created via the below macros should be + * used to manipulate these flags. + * + * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at + * allocation time. Cleared when page is fully instantiated. Free + * routine checks flag to restore a reservation on error paths. + * Synchronization: Examined or modified by code that knows it has + * the only reference to page. i.e. After allocation but before use + * or when the page is being freed. + * HPG_migratable - Set after a newly allocated page is added to the page + * cache and/or page tables. Indicates the page is a candidate for + * migration. + * Synchronization: Initially set after new page allocation with no + * locking. When examined and modified during migration processing + * (isolate, migrate, putback) the hugetlb_lock is held. + * HPG_temporary - - Set on a page that is temporarily allocated from the buddy + * allocator. Typically used for migration target pages when no pages + * are available in the pool. The hugetlb free page path will + * immediately free pages with this flag set to the buddy allocator. + * Synchronization: Can be set after huge page allocation from buddy when + * code knows it has only reference. All other examinations and + * modifications require hugetlb_lock. + * HPG_freed - Set when page is on the free lists. + * Synchronization: hugetlb_lock held for examination and modification. + */ +enum hugetlb_page_flags { + HPG_restore_reserve = 0, + HPG_migratable, + HPG_temporary, + HPG_freed, + __NR_HPAGEFLAGS, +}; + +/* + * Macros to create test, set and clear function definitions for + * hugetlb specific page flags. + */ +#ifdef CONFIG_HUGETLB_PAGE +#define TESTHPAGEFLAG(uname, flname) \ +static inline int HPage##uname(struct page *page) \ + { return test_bit(HPG_##flname, &(page->private)); } + +#define SETHPAGEFLAG(uname, flname) \ +static inline void SetHPage##uname(struct page *page) \ + { set_bit(HPG_##flname, &(page->private)); } + +#define CLEARHPAGEFLAG(uname, flname) \ +static inline void ClearHPage##uname(struct page *page) \ + { clear_bit(HPG_##flname, &(page->private)); } +#else +#define TESTHPAGEFLAG(uname, flname) \ +static inline int HPage##uname(struct page *page) \ + { return 0; } + +#define SETHPAGEFLAG(uname, flname) \ +static inline void SetHPage##uname(struct page *page) \ + { } + +#define CLEARHPAGEFLAG(uname, flname) \ +static inline void ClearHPage##uname(struct page *page) \ + { } +#endif + +#define HPAGEFLAG(uname, flname) \ + TESTHPAGEFLAG(uname, flname) \ + SETHPAGEFLAG(uname, flname) \ + CLEARHPAGEFLAG(uname, flname) \ + +/* + * Create functions associated with hugetlb page flags + */ +HPAGEFLAG(RestoreReserve, restore_reserve) +HPAGEFLAG(Migratable, migratable) +HPAGEFLAG(Temporary, temporary) +HPAGEFLAG(Freed, freed) + #ifdef CONFIG_HUGETLB_PAGE #define HSTATE_NAME_LEN 32 @@ -531,6 +609,20 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) +/* + * hugetlb page subpool pointer located in hpage[1].private + */ +static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) +{ + return (struct hugepage_subpool *)(hpage+1)->private; +} + +static inline void hugetlb_set_page_subpool(struct page *hpage, + struct hugepage_subpool *subpool) +{ + set_page_private(hpage+1, (unsigned long)subpool); +} + static inline struct hstate *hstate_file(struct file *f) { return hstate_inode(file_inode(f)); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 5ddb479c4d4c..f1d74dcf0353 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -785,6 +785,7 @@ struct vmbus_device { u16 dev_type; guid_t guid; bool perf_device; + bool allowed_in_isolated; }; struct vmbus_channel { @@ -803,6 +804,7 @@ struct vmbus_channel { u8 monitor_bit; bool rescind; /* got rescind msg */ + bool rescind_ref; /* got rescind msg, got channel reference */ struct completion rescind_event; u32 ringbuffer_gpadlhandle; @@ -1471,6 +1473,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); #define ICMSGTYPE_SHUTDOWN 3 #define ICMSGTYPE_TIMESYNC 4 #define ICMSGTYPE_VSS 5 +#define ICMSGTYPE_FCOPY 7 #define ICMSGHDRFLAG_TRANSACTION 1 #define ICMSGHDRFLAG_REQUEST 2 @@ -1514,11 +1517,17 @@ struct icmsg_hdr { u8 reserved[2]; } __packed; +#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100 +#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)) +#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \ + (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \ + (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version))) + struct icmsg_negotiate { u16 icframe_vercnt; u16 icmsg_vercnt; u32 reserved; - struct ic_version icversion_data[1]; /* any size array */ + struct ic_version icversion_data[]; /* any size array */ } __packed; struct shutdown_msg_data { @@ -1569,7 +1578,7 @@ struct hyperv_service_callback { }; #define MAX_SRV_VER 0x7ffffff -extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, +extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen, const int *fw_version, int fw_vercnt, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version); diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index de102e4418ab..8242e13e7b0b 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -176,7 +176,7 @@ struct i3c_device; struct i3c_driver { struct device_driver driver; int (*probe)(struct i3c_device *dev); - int (*remove)(struct i3c_device *dev); + void (*remove)(struct i3c_device *dev); const struct i3c_device_id *id_table; }; diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 1b3371ae8193..9055cb380ee2 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -3,6 +3,7 @@ #define _LINUX_ICMPV6_H #include <linux/skbuff.h> +#include <linux/ipv6.h> #include <uapi/linux/icmpv6.h> static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); -#if IS_BUILTIN(CONFIG_IPV6) + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); -static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +#if IS_BUILTIN(CONFIG_IPV6) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { - icmp6_send(skb, type, code, info, NULL); + icmp6_send(skb, type, code, info, NULL, parm); } static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { @@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) return 0; } #else -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); #endif +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); #if IS_ENABLED(CONFIG_NF_NAT) void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); #else -#define icmpv6_ndo_send icmpv6_send +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} #endif #else diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h new file mode 100644 index 000000000000..38bbc537d4e4 --- /dev/null +++ b/include/linux/if_hsr.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_HSR_H_ +#define _LINUX_IF_HSR_H_ + +/* used to differentiate various protocols */ +enum hsr_version { + HSR_V0 = 0, + HSR_V1, + PRP_V1, +}; + +#if IS_ENABLED(CONFIG_HSR) +extern bool is_hsr_master(struct net_device *dev); +extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver); +#else +static inline bool is_hsr_master(struct net_device *dev) +{ + return false; +} +static inline int hsr_get_version(struct net_device *dev, + enum hsr_version *ver) +{ + return -EINVAL; +} +#endif /* CONFIG_HSR */ + +#endif /*_LINUX_IF_HSR_H_*/ diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h new file mode 100644 index 000000000000..33f60f43e1aa --- /dev/null +++ b/include/linux/iio/adc/qcom-vadc-common.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Code shared between the different Qualcomm PMIC voltage ADCs + */ + +#ifndef QCOM_VADC_COMMON_H +#define QCOM_VADC_COMMON_H + +#include <linux/types.h> + +#define VADC_CONV_TIME_MIN_US 2000 +#define VADC_CONV_TIME_MAX_US 2100 + +/* Min ADC code represents 0V */ +#define VADC_MIN_ADC_CODE 0x6000 +/* Max ADC code represents full-scale range of 1.8V */ +#define VADC_MAX_ADC_CODE 0xa800 + +#define VADC_ABSOLUTE_RANGE_UV 625000 +#define VADC_RATIOMETRIC_RANGE 1800 + +#define VADC_DEF_PRESCALING 0 /* 1:1 */ +#define VADC_DEF_DECIMATION 0 /* 512 */ +#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */ +#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */ +#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE + +#define VADC_DECIMATION_MIN 512 +#define VADC_DECIMATION_MAX 4096 +#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */ +#define ADC5_DECIMATION_SHORT 250 +#define ADC5_DECIMATION_MEDIUM 420 +#define ADC5_DECIMATION_LONG 840 +/* Default decimation - 1024 for rev2, 840 for pmic5 */ +#define ADC5_DECIMATION_DEFAULT 2 +#define ADC5_DECIMATION_SAMPLES_MAX 3 + +#define VADC_HW_SETTLE_DELAY_MAX 10000 +#define VADC_HW_SETTLE_SAMPLES_MAX 16 +#define VADC_AVG_SAMPLES_MAX 512 +#define ADC5_AVG_SAMPLES_MAX 16 + +#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500 +#define PMIC5_SMB_TEMP_CONSTANT 419400 +#define PMIC5_SMB_TEMP_SCALE_FACTOR 356 + +#define PMI_CHG_SCALE_1 -138890 +#define PMI_CHG_SCALE_2 391750000000LL + +#define VADC5_MAX_CODE 0x7fff +#define ADC5_FULL_SCALE_CODE 0x70e4 +#define ADC5_USR_DATA_CHECK 0x8000 + +#define R_PU_100K 100000 +#define RATIO_MAX_ADC7 BIT(14) + +/* + * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels. + * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for + * calibration. + */ +enum vadc_calibration { + VADC_CALIB_ABSOLUTE = 0, + VADC_CALIB_RATIOMETRIC +}; + +/** + * struct vadc_linear_graph - Represent ADC characteristics. + * @dy: numerator slope to calculate the gain. + * @dx: denominator slope to calculate the gain. + * @gnd: A/D word of the ground reference used for the channel. + * + * Each ADC device has different offset and gain parameters which are + * computed to calibrate the device. + */ +struct vadc_linear_graph { + s32 dy; + s32 dx; + s32 gnd; +}; + +/** + * struct vadc_prescale_ratio - Represent scaling ratio for ADC input. + * @num: the inverse numerator of the gain applied to the input channel. + * @den: the inverse denominator of the gain applied to the input channel. + */ +struct vadc_prescale_ratio { + u32 num; + u32 den; +}; + +/** + * enum vadc_scale_fn_type - Scaling function to convert ADC code to + * physical scaled units for the channel. + * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV). + * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC. + * Uses a mapping table with 100K pullup. + * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade. + * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC. + * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp + * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to + * voltage (uV) with hardware applied offset/slope values to adc code. + * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using + * lookup table. The hardware applies offset/slope to adc code. + * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using + * 100k pullup. The hardware applies offset/slope to adc code. + * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using + * lookup table for PMIC7. The hardware applies offset/slope to adc code. + * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. + * The hardware applies offset/slope to adc code. + * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade. + * The hardware applies offset/slope to adc code. This is for PMIC7. + * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5 + * charger temperature. + * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5 + * SMB1390 temperature. + */ +enum vadc_scale_fn_type { + SCALE_DEFAULT = 0, + SCALE_THERM_100K_PULLUP, + SCALE_PMIC_THERM, + SCALE_XOTHERM, + SCALE_PMI_CHG_TEMP, + SCALE_HW_CALIB_DEFAULT, + SCALE_HW_CALIB_THERM_100K_PULLUP, + SCALE_HW_CALIB_XOTHERM, + SCALE_HW_CALIB_THERM_100K_PU_PM7, + SCALE_HW_CALIB_PMIC_THERM, + SCALE_HW_CALIB_PMIC_THERM_PM7, + SCALE_HW_CALIB_PM5_CHG_TEMP, + SCALE_HW_CALIB_PM5_SMB_TEMP, + SCALE_HW_CALIB_INVALID, +}; + +struct adc5_data { + const u32 full_scale_code_volt; + const u32 full_scale_code_cur; + const struct adc5_channels *adc_chans; + const struct iio_info *info; + unsigned int *decimation; + unsigned int *hw_settle_1; + unsigned int *hw_settle_2; +}; + +int qcom_vadc_scale(enum vadc_scale_fn_type scaletype, + const struct vadc_linear_graph *calib_graph, + const struct vadc_prescale_ratio *prescale, + bool absolute, + u16 adc_code, int *result_mdec); + +struct qcom_adc5_scale_type { + int (*scale_fn)(const struct vadc_prescale_ratio *prescale, + const struct adc5_data *data, u16 adc_code, int *result); +}; + +int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype, + unsigned int prescale_ratio, + const struct adc5_data *data, + u16 adc_code, int *result_mdec); + +u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio, + u32 full_scale_code_volt, int temp); + +int qcom_adc5_prescaling_from_dt(u32 num, u32 den); + +int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle); + +int qcom_adc5_avg_samples_from_dt(u32 value); + +int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation); + +int qcom_vadc_decimation_from_dt(u32 value); + +#endif /* QCOM_VADC_COMMON_H */ diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index c4118dcb8e05..0a90ba8fa1bb 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -13,6 +13,7 @@ struct iio_dev; struct iio_chan_spec; struct device; +struct device_node; /** * struct iio_channel - everything needed for a consumer to use a channel @@ -97,6 +98,41 @@ void iio_channel_release_all(struct iio_channel *chan); */ struct iio_channel *devm_iio_channel_get_all(struct device *dev); +/** + * of_iio_channel_get_by_name() - get description of all that is needed to access channel. + * @np: Pointer to consumer device tree node + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + */ +#ifdef CONFIG_OF +struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name); +#else +static inline struct iio_channel * +of_iio_channel_get_by_name(struct device_node *np, const char *name) +{ + return NULL; +} +#endif + +/** + * devm_of_iio_channel_get_by_name() - Resource managed version of of_iio_channel_get_by_name(). + * @dev: Pointer to consumer device. + * @np: Pointer to consumer device tree node + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns valid pointer for iio channel. + * + * The allocated iio channel is automatically released when the device is + * unbound. + */ +struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev, + struct device_node *np, + const char *consumer_channel); + struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/ima.h b/include/linux/ima.h index 7db9cca1af34..61d5723ec303 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -16,7 +16,8 @@ struct linux_binprm; #ifdef CONFIG_IMA extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); -extern void ima_post_create_tmpfile(struct inode *inode); +extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns, + struct inode *inode); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); @@ -27,10 +28,15 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id, bool contents); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); -extern void ima_post_path_mknod(struct dentry *dentry); +extern void ima_post_path_mknod(struct user_namespace *mnt_userns, + struct dentry *dentry); extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size); extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); +extern void ima_measure_critical_data(const char *event_label, + const char *event_name, + const void *buf, size_t buf_len, + bool hash); #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM extern void ima_appraise_parse_cmdline(void); @@ -68,7 +74,8 @@ static inline int ima_file_check(struct file *file, int mask) return 0; } -static inline void ima_post_create_tmpfile(struct inode *inode) +static inline void ima_post_create_tmpfile(struct user_namespace *mnt_userns, + struct inode *inode) { } @@ -112,7 +119,8 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, return 0; } -static inline void ima_post_path_mknod(struct dentry *dentry) +static inline void ima_post_path_mknod(struct user_namespace *mnt_userns, + struct dentry *dentry) { return; } @@ -128,6 +136,12 @@ static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size } static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} + +static inline void ima_measure_critical_data(const char *event_label, + const char *event_name, + const void *buf, size_t buf_len, + bool hash) {} + #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC @@ -153,7 +167,8 @@ static inline void ima_post_key_create_or_update(struct key *keyring, #ifdef CONFIG_IMA_APPRAISE extern bool is_ima_appraise_enabled(void); -extern void ima_inode_post_setattr(struct dentry *dentry); +extern void ima_inode_post_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry); extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name); @@ -163,7 +178,8 @@ static inline bool is_ima_appraise_enabled(void) return 0; } -static inline void ima_inode_post_setattr(struct dentry *dentry) +static inline void ima_inode_post_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry) { return; } diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index 54c02c84906a..c1c76a70a6ce 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -36,6 +36,7 @@ #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE +#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f) #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) @@ -44,6 +45,7 @@ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static +#define EXPORT_INDIRECT_CALLABLE(f) #endif /* @@ -60,4 +62,10 @@ #define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__) #endif +#if IS_ENABLED(CONFIG_INET) +#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) +#else +#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__) +#endif + #endif diff --git a/include/linux/init.h b/include/linux/init.h index e668832ef66a..31f54de58429 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -184,19 +184,80 @@ extern bool initcall_debug; * as KEEP() in the linker script. */ +/* Format: <modname>__<counter>_<line>_<fn> */ +#define __initcall_id(fn) \ + __PASTE(__KBUILD_MODNAME, \ + __PASTE(__, \ + __PASTE(__COUNTER__, \ + __PASTE(_, \ + __PASTE(__LINE__, \ + __PASTE(_, fn)))))) + +/* Format: __<prefix>__<iid><id> */ +#define __initcall_name(prefix, __iid, id) \ + __PASTE(__, \ + __PASTE(prefix, \ + __PASTE(__, \ + __PASTE(__iid, id)))) + +#ifdef CONFIG_LTO_CLANG +/* + * With LTO, the compiler doesn't necessarily obey link order for + * initcalls. In order to preserve the correct order, we add each + * variable into its own section and generate a linker script (in + * scripts/link-vmlinux.sh) to specify the order of the sections. + */ +#define __initcall_section(__sec, __iid) \ + #__sec ".init.." #__iid + +/* + * With LTO, the compiler can rename static functions to avoid + * global naming collisions. We use a global stub function for + * initcalls to create a stable symbol name whose address can be + * taken in inline assembly when PREL32 relocations are used. + */ +#define __initcall_stub(fn, __iid, id) \ + __initcall_name(initstub, __iid, id) + +#define __define_initcall_stub(__stub, fn) \ + int __init __stub(void); \ + int __init __stub(void) \ + { \ + return fn(); \ + } \ + __ADDRESSABLE(__stub) +#else +#define __initcall_section(__sec, __iid) \ + #__sec ".init" + +#define __initcall_stub(fn, __iid, id) fn + +#define __define_initcall_stub(__stub, fn) \ + __ADDRESSABLE(fn) +#endif + #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define ___define_initcall(fn, id, __sec) \ - __ADDRESSABLE(fn) \ - asm(".section \"" #__sec ".init\", \"a\" \n" \ - "__initcall_" #fn #id ": \n" \ - ".long " #fn " - . \n" \ +#define ____define_initcall(fn, __stub, __name, __sec) \ + __define_initcall_stub(__stub, fn) \ + asm(".section \"" __sec "\", \"a\" \n" \ + __stringify(__name) ": \n" \ + ".long " __stringify(__stub) " - . \n" \ ".previous \n"); #else -#define ___define_initcall(fn, id, __sec) \ - static initcall_t __initcall_##fn##id __used \ - __attribute__((__section__(#__sec ".init"))) = fn; +#define ____define_initcall(fn, __unused, __name, __sec) \ + static initcall_t __name __used \ + __attribute__((__section__(__sec))) = fn; #endif +#define __unique_initcall(fn, id, __sec, __iid) \ + ____define_initcall(fn, \ + __initcall_stub(fn, __iid, id), \ + __initcall_name(initcall, __iid, id), \ + __initcall_section(__sec, __iid)) + +#define ___define_initcall(fn, id, __sec) \ + __unique_initcall(fn, id, __sec, __initcall_id(fn)) + #define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) /* @@ -236,7 +297,7 @@ extern bool initcall_debug; #define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn -#define console_initcall(fn) ___define_initcall(fn,, .con_initcall) +#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall) struct obs_kernel_param { const char *str; @@ -277,14 +338,14 @@ struct obs_kernel_param { var = 1; \ return 0; \ } \ - __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \ + early_param(str_on, parse_##var##_on); \ \ static int __init parse_##var##_off(char *arg) \ { \ var = 0; \ return 0; \ } \ - __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1) + early_param(str_off, parse_##var##_off) /* Relies on boot_command_line being set */ void __init parse_early_param(void); diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 8db6f8c8030b..85c15717af34 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_INITRD_H +#define __LINUX_INITRD_H + #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ /* starting block # of image */ @@ -15,6 +18,12 @@ extern int initrd_below_start_ok; extern unsigned long initrd_start, initrd_end; extern void free_initrd_mem(unsigned long, unsigned long); +#ifdef CONFIG_BLK_DEV_INITRD +extern void __init reserve_initrd_mem(void); +#else +static inline void __init reserve_initrd_mem(void) {} +#endif + extern phys_addr_t phys_initrd_start; extern unsigned long phys_initrd_size; @@ -24,3 +33,5 @@ extern char __initramfs_start[]; extern unsigned long __initramfs_size; void console_on_rootfs(void); + +#endif /* __LINUX_INITRD_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 09c6a0bf3892..1bc46b88711a 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -42,6 +42,8 @@ #define DMA_FL_PTE_PRESENT BIT_ULL(0) #define DMA_FL_PTE_US BIT_ULL(2) +#define DMA_FL_PTE_ACCESS BIT_ULL(5) +#define DMA_FL_PTE_DIRTY BIT_ULL(6) #define DMA_FL_PTE_XD BIT_ULL(63) #define ADDR_WIDTH_5LEVEL (57) @@ -168,34 +170,37 @@ * Extended Capability Register */ +#define ecap_rps(e) (((e) >> 49) & 0x1) #define ecap_smpwc(e) (((e) >> 48) & 0x1) #define ecap_flts(e) (((e) >> 47) & 0x1) #define ecap_slts(e) (((e) >> 46) & 0x1) +#define ecap_slads(e) (((e) >> 45) & 0x1) #define ecap_vcs(e) (((e) >> 44) & 0x1) #define ecap_smts(e) (((e) >> 43) & 0x1) -#define ecap_dit(e) ((e >> 41) & 0x1) -#define ecap_pasid(e) ((e >> 40) & 0x1) -#define ecap_pss(e) ((e >> 35) & 0x1f) -#define ecap_eafs(e) ((e >> 34) & 0x1) -#define ecap_nwfs(e) ((e >> 33) & 0x1) -#define ecap_srs(e) ((e >> 31) & 0x1) -#define ecap_ers(e) ((e >> 30) & 0x1) -#define ecap_prs(e) ((e >> 29) & 0x1) -#define ecap_broken_pasid(e) ((e >> 28) & 0x1) -#define ecap_dis(e) ((e >> 27) & 0x1) -#define ecap_nest(e) ((e >> 26) & 0x1) -#define ecap_mts(e) ((e >> 25) & 0x1) -#define ecap_ecs(e) ((e >> 24) & 0x1) +#define ecap_dit(e) (((e) >> 41) & 0x1) +#define ecap_pds(e) (((e) >> 42) & 0x1) +#define ecap_pasid(e) (((e) >> 40) & 0x1) +#define ecap_pss(e) (((e) >> 35) & 0x1f) +#define ecap_eafs(e) (((e) >> 34) & 0x1) +#define ecap_nwfs(e) (((e) >> 33) & 0x1) +#define ecap_srs(e) (((e) >> 31) & 0x1) +#define ecap_ers(e) (((e) >> 30) & 0x1) +#define ecap_prs(e) (((e) >> 29) & 0x1) +#define ecap_broken_pasid(e) (((e) >> 28) & 0x1) +#define ecap_dis(e) (((e) >> 27) & 0x1) +#define ecap_nest(e) (((e) >> 26) & 0x1) +#define ecap_mts(e) (((e) >> 25) & 0x1) +#define ecap_ecs(e) (((e) >> 24) & 0x1) #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) #define ecap_coherent(e) ((e) & 0x1) #define ecap_qis(e) ((e) & 0x2) -#define ecap_pass_through(e) ((e >> 6) & 0x1) -#define ecap_eim_support(e) ((e >> 4) & 0x1) -#define ecap_ir_support(e) ((e >> 3) & 0x1) +#define ecap_pass_through(e) (((e) >> 6) & 0x1) +#define ecap_eim_support(e) (((e) >> 4) & 0x1) +#define ecap_ir_support(e) (((e) >> 3) & 0x1) #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) -#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) -#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ +#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) +#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ /* Virtual command interface capability */ #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ @@ -662,7 +667,7 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) * 7: super page * 8-10: available * 11: snoop behavior - * 12-63: Host physcial address + * 12-63: Host physical address */ struct dma_pte { u64 val; diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h deleted file mode 100644 index fcd841a90f2f..000000000000 --- a/include/linux/intel-pti.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) Intel 2011 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The PTI (Parallel Trace Interface) driver directs trace data routed from - * various parts in the system out through the Intel Penwell PTI port and - * out of the mobile device for analysis with a debugging tool - * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, - * compact JTAG, standard. - * - * This header file will allow other parts of the OS to use the - * interface to write out it's contents for debugging a mobile system. - */ - -#ifndef LINUX_INTEL_PTI_H_ -#define LINUX_INTEL_PTI_H_ - -/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ -#define PTI_LASTDWORD_DTS 0x30 - -/* basic structure used as a write address to the PTI HW */ -struct pti_masterchannel { - u8 master; - u8 channel; -}; - -/* the following functions are defined in misc/pti.c */ -void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); -struct pti_masterchannel *pti_request_masterchannel(u8 type, - const char *thread_name); -void pti_release_masterchannel(struct pti_masterchannel *mc); - -#endif /* LINUX_INTEL_PTI_H_ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bb8ff9083e7d..967e25767153 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -569,15 +569,6 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); -#ifdef __ARCH_HAS_DO_SOFTIRQ -void do_softirq_own_stack(void); -#else -static inline void do_softirq_own_stack(void) -{ - __do_softirq(); -} -#endif - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index ea727eb1a1a9..a4c9ca2c31f1 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -15,6 +15,7 @@ enum io_pgtable_fmt { ARM_64_LPAE_S2, ARM_V7S, ARM_MALI_LPAE, + AMD_IOMMU_V1, IO_PGTABLE_NUM_FMTS, }; @@ -68,13 +69,9 @@ struct io_pgtable_cfg { * hardware which does not implement the permissions of a given * format, and/or requires some format-specific default value. * - * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid - * (unmapped) entries but the hardware might do so anyway, perform - * TLB maintenance when mapping as well as when unmapping. - * * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend - * to support up to 34 bits PA where the bit32 and bit33 are - * encoded in the bit9 and bit4 of the PTE respectively. + * to support up to 35 bits PA where the bit32, bit33 and bit34 are + * encoded in the bit9, bit4 and bit5 of the PTE respectively. * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for @@ -88,7 +85,6 @@ struct io_pgtable_cfg { */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) - #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) @@ -214,14 +210,16 @@ struct io_pgtable_domain_attr { static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { - iop->cfg.tlb->tlb_flush_all(iop->cookie); + if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) + iop->cfg.tlb->tlb_flush_all(iop->cookie); } static inline void io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, size_t size, size_t granule) { - iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); + if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk) + iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); } static inline void @@ -229,7 +227,7 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, struct iommu_iotlb_gather * gather, unsigned long iova, size_t granule) { - if (iop->cfg.tlb->tlb_add_page) + if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page) iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); } @@ -251,5 +249,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; +extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; #endif /* __IO_PGTABLE_H */ diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 35b2d845704d..9761a0ec9f95 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -5,33 +5,29 @@ #include <linux/sched.h> #include <linux/xarray.h> -struct io_identity { - struct files_struct *files; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *creds; - struct nsproxy *nsproxy; - struct fs_struct *fs; - unsigned long fsize; -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - refcount_t count; +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; }; struct io_uring_task { /* submission side */ struct xarray xa; struct wait_queue_head wait; - struct file *last; + void *last; + void *io_wq; struct percpu_counter inflight; - struct io_identity __identity; - struct io_identity *identity; atomic_t in_idle; bool sqpoll; + + spinlock_t task_lock; + struct io_wq_work_list task_list; + unsigned long task_state; + struct callback_head task_work; }; #if defined(CONFIG_IO_URING) @@ -42,12 +38,12 @@ void __io_uring_free(struct task_struct *tsk); static inline void io_uring_task_cancel(void) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) + if (current->io_uring) __io_uring_task_cancel(); } static inline void io_uring_files_cancel(struct files_struct *files) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) + if (current->io_uring) __io_uring_files_cancel(files); } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 5bd3cac4df9c..d202fd2d0f91 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -55,6 +55,7 @@ struct vm_fault; #define IOMAP_F_SHARED 0x04 #define IOMAP_F_MERGED 0x08 #define IOMAP_F_BUFFER_HEAD 0x10 +#define IOMAP_F_ZONE_APPEND 0x20 /* * Flags set by the core iomap code during operations: @@ -122,6 +123,7 @@ struct iomap_page_ops { #define IOMAP_FAULT (1 << 3) /* mapping for page fault */ #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ +#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ struct iomap_ops { /* @@ -256,12 +258,25 @@ struct iomap_dio_ops { struct bio *bio, loff_t file_offset); }; +/* + * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not + * synchronous. + */ +#define IOMAP_DIO_FORCE_WAIT (1 << 0) + +/* + * Do not allocate blocks or zero partial blocks, but instead fall back to + * the caller by returning -EAGAIN. Used to optimize direct I/O writes that + * are not aligned to the file system block size. + */ +#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1) + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - bool wait_for_completion); + unsigned int dio_flags); struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - bool wait_for_completion); + unsigned int dio_flags); ssize_t iomap_dio_complete(struct iomap_dio *dio); int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b3f0e2018c62..5e7fe519430a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -170,7 +170,7 @@ enum iommu_dev_features { * struct iommu_iotlb_gather - Range information for a pending IOTLB flush * * @start: IOVA representing the start of the range to be flushed - * @end: IOVA representing the end of the range to be flushed (exclusive) + * @end: IOVA representing the end of the range to be flushed (inclusive) * @pgsize: The interval at which to perform the flush * * This structure is intended to be updated by multiple calls to the @@ -246,7 +246,8 @@ struct iommu_ops { size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain); + void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); @@ -376,6 +377,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, void iommu_device_sysfs_remove(struct iommu_device *iommu); int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); +int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); static inline void __iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) @@ -514,7 +516,6 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t offset, u64 size, int prot); -extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -538,7 +539,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size) { - unsigned long start = iova, end = start + size; + unsigned long start = iova, end = start + size - 1; /* * If the new page is disjoint from the current range or is mapped at @@ -616,7 +617,10 @@ static inline void dev_iommu_fwspec_set(struct device *dev, static inline void *dev_iommu_priv_get(struct device *dev) { - return dev->iommu->priv; + if (dev->iommu) + return dev->iommu->priv; + else + return NULL; } static inline void dev_iommu_priv_set(struct device *dev, void *priv) @@ -627,7 +631,6 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv) int iommu_probe_device(struct device *dev); void iommu_release_device(struct device *dev); -bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); @@ -746,11 +749,6 @@ static inline int iommu_domain_window_enable(struct iommu_domain *domain, return -ENODEV; } -static inline void iommu_domain_window_disable(struct iommu_domain *domain, - u32 wnd_nr) -{ -} - static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; @@ -982,12 +980,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) } static inline bool -iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) -{ - return false; -} - -static inline bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) { return false; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index fe48b7840665..55de385c839c 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -334,11 +334,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq) res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; } -#ifdef CONFIG_IO_STRICT_DEVMEM -void revoke_devmem(struct resource *res); -#else -static inline void revoke_devmem(struct resource *res) { }; -#endif +extern struct address_space *iomem_get_mapping(void); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 76e16ae20729..c834c01c0a5b 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -150,10 +150,8 @@ unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); -void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); -bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); @@ -212,22 +210,12 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad, return NULL; } -static inline void copy_reserved_iova(struct iova_domain *from, - struct iova_domain *to) -{ -} - static inline void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn) { } -static inline bool has_iova_flush_queue(struct iova_domain *iovad) -{ - return false; -} - static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index dda61d150a13..70b2ad3b9884 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -31,6 +31,7 @@ struct ipv6_devconf { __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; @@ -84,7 +85,6 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; -#include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> diff --git a/include/linux/irq.h b/include/linux/irq.h index 4aeb1c4c7e07..2efde6a79b7e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -928,7 +928,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ - irq_alloc_descs(-1, 0, 1, node) + irq_alloc_descs(-1, 1, 1, node) #define irq_alloc_desc_at(at, node) \ irq_alloc_descs(at, at, 1, node) @@ -943,7 +943,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) #define devm_irq_alloc_desc(dev, node) \ - devm_irq_alloc_descs(dev, -1, 0, 1, node) + devm_irq_alloc_descs(dev, -1, 1, 1, node) #define devm_irq_alloc_desc_at(dev, at, node) \ devm_irq_alloc_descs(dev, at, at, 1, node) diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 42d196805f58..33cacc8af26d 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -150,7 +150,6 @@ struct irq_domain_chip_generic; * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains - * @debugfs_file: dentry for the domain debugfs file * * Revmap data, used internally by irq_domain * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that @@ -174,9 +173,6 @@ struct irq_domain { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif -#ifdef CONFIG_GENERIC_IRQ_DEBUGFS - struct dentry *debugfs_file; -#endif /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 8de0e1373de7..600c10da321a 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -149,6 +149,17 @@ do { \ # define start_critical_timings() do { } while (0) #endif +#ifdef CONFIG_DEBUG_IRQFLAGS +extern void warn_bogus_irq_restore(void); +#define raw_check_bogus_irq_restore() \ + do { \ + if (unlikely(!arch_irqs_disabled())) \ + warn_bogus_irq_restore(); \ + } while (0) +#else +#define raw_check_bogus_irq_restore() do { } while (0) +#endif + /* * Wrap the arch provided IRQ routines to provide appropriate checks. */ @@ -162,6 +173,7 @@ do { \ #define raw_local_irq_restore(flags) \ do { \ typecheck(unsigned long, flags); \ + raw_check_bogus_irq_restore(); \ arch_local_irq_restore(flags); \ } while (0) #define raw_local_save_flags(flags) \ diff --git a/include/linux/isa.h b/include/linux/isa.h index 41336da0f4e7..e30963190968 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -13,7 +13,7 @@ struct isa_driver { int (*match)(struct device *, unsigned int); int (*probe)(struct device *, unsigned int); - int (*remove)(struct device *, unsigned int); + void (*remove)(struct device *, unsigned int); void (*shutdown)(struct device *, unsigned int); int (*suspend)(struct device *, unsigned int, pm_message_t); int (*resume)(struct device *, unsigned int); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 32809624d422..d92691262f51 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -261,14 +261,14 @@ static __always_inline void jump_label_init(void) static __always_inline bool static_key_false(struct static_key *key) { - if (unlikely(static_key_count(key) > 0)) + if (unlikely_notrace(static_key_count(key) > 0)) return true; return false; } static __always_inline bool static_key_true(struct static_key *key) { - if (likely(static_key_count(key) > 0)) + if (likely_notrace(static_key_count(key) > 0)) return true; return false; } @@ -460,7 +460,7 @@ extern bool ____wrong_branch_error(void); branch = !arch_static_branch_jump(&(x)->key, true); \ else \ branch = ____wrong_branch_error(); \ - likely(branch); \ + likely_notrace(branch); \ }) #define static_branch_unlikely(x) \ @@ -472,13 +472,13 @@ extern bool ____wrong_branch_error(void); branch = arch_static_branch(&(x)->key, false); \ else \ branch = ____wrong_branch_error(); \ - unlikely(branch); \ + unlikely_notrace(branch); \ }) #else /* !CONFIG_JUMP_LABEL */ -#define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) -#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) +#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key)) +#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key)) #endif /* CONFIG_JUMP_LABEL */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 481273f0c72d..465060acc981 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -71,15 +71,14 @@ static inline void *dereference_symbol_descriptor(void *ptr) return ptr; } -#ifdef CONFIG_KALLSYMS -/* Lookup the address for a symbol. Returns 0 if not found. */ -unsigned long kallsyms_lookup_name(const char *name); - -/* Call a function on each kallsyms symbol in the core kernel */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data); +#ifdef CONFIG_KALLSYMS +/* Lookup the address for a symbol. Returns 0 if not found. */ +unsigned long kallsyms_lookup_name(const char *name); + extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); @@ -108,14 +107,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name) return 0; } -static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, - unsigned long), - void *data) -{ - return 0; -} - static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index ca5e89fb10d3..3d6d22a25bdc 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -5,6 +5,12 @@ #include <linux/types.h> /* + * The annotations present in this file are only relevant for the software + * KASAN modes that rely on compiler instrumentation, and will be optimized + * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead. + */ + +/* * __kasan_check_*: Always available when KASAN is enabled. This may be used * even in compilation units that selectively disable KASAN, but must use KASAN * to validate access to an address. Never use these in header files! diff --git a/include/linux/kasan.h b/include/linux/kasan.h index fe1ae73ff8b5..b91732bd05d7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {} struct kasan_cache { int alloc_meta_offset; int free_meta_offset; + bool is_kmalloc; }; #ifdef CONFIG_KASAN_HW_TAGS @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache, __kasan_cache_create(cache, size, flags); } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache); +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + if (kasan_enabled()) + __kasan_cache_create_kmalloc(cache); +} + size_t __kasan_metadata_size(struct kmem_cache *cache); static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) { @@ -185,19 +193,25 @@ static __always_inline void * __must_check kasan_init_slab_obj( } bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) { if (kasan_enabled()) - return __kasan_slab_free(s, object, ip); + return __kasan_slab_free(s, object, _RET_IP_); return false; } +void __kasan_kfree_large(void *ptr, unsigned long ip); +static __always_inline void kasan_kfree_large(void *ptr) +{ + if (kasan_enabled()) + __kasan_kfree_large(ptr, _RET_IP_); +} + void __kasan_slab_free_mempool(void *ptr, unsigned long ip); -static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +static __always_inline void kasan_slab_free_mempool(void *ptr) { if (kasan_enabled()) - __kasan_slab_free_mempool(ptr, ip); + __kasan_slab_free_mempool(ptr, _RET_IP_); } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, @@ -240,13 +254,19 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_kfree_large(void *ptr, unsigned long ip); -static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) +/* + * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for + * the hardware tag-based mode that doesn't rely on compiler instrumentation. + */ +bool __kasan_check_byte(const void *addr, unsigned long ip); +static __always_inline bool kasan_check_byte(const void *addr) { if (kasan_enabled()) - __kasan_kfree_large(ptr, ip); + return __kasan_check_byte(addr, _RET_IP_); + return true; } + bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); @@ -266,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, @@ -277,12 +298,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) { return false; } -static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} +static inline void kasan_kfree_large(void *ptr) {} +static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { @@ -302,7 +323,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} +static inline bool kasan_check_byte(const void *address) +{ + return true; +} #endif /* CONFIG_KASAN */ @@ -333,6 +357,13 @@ static inline void *kasan_reset_tag(const void *addr) return (void *)arch_kasan_reset_tag(addr); } +/** + * kasan_report - print a report about a bad memory access detected by KASAN + * @addr: address of the bad access + * @size: size of the bad access + * @is_write: whether the bad access is a write or a read + * @ip: instruction pointer for the accessibility check or the bad access itself + */ bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index 82f29aa35062..c40811d79769 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -6,8 +6,6 @@ #include <linux/interrupt.h> #include <linux/keyboard.h> -extern struct tasklet_struct keyboard_tasklet; - extern char *func_table[MAX_NR_FUNC]; /* @@ -71,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led); extern int set_console(int nr); extern void schedule_console_callback(void); -/* FIXME: review locking for vt.c callers */ -static inline void set_leds(void) -{ - tasklet_schedule(&keyboard_tasklet); -} - static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) { return ((kbd->modeflags >> flag) & 1); @@ -135,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) struct console; -void compute_shiftstate(void); +void vt_set_leds_compute_shiftstate(void); /* defkeymap.c */ diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 9d12c970f18f..24a59cb06963 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -2,8 +2,6 @@ #ifndef __LINUX_KCONFIG_H #define __LINUX_KCONFIG_H -/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ - #include <generated/autoconf.h> #ifdef CONFIG_CPU_BIG_ENDIAN @@ -72,4 +70,10 @@ */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) +/* + * IF_ENABLED(CONFIG_FOO, ptr) evaluates to (ptr) if CONFIG_FOO is set to 'y' + * or 'm', NULL otherwise. + */ +#define IF_ENABLED(option, ptr) (IS_ENABLED(option) ? (ptr) : NULL) + #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kd.h b/include/linux/kd.h deleted file mode 100644 index b130a18f860f..000000000000 --- a/include/linux/kd.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_KD_H -#define _LINUX_KD_H - -#include <uapi/linux/kd.h> - -#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */ -#endif /* _LINUX_KD_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f7902d8c1048..5b7ed6dc99ac 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -15,7 +15,7 @@ #include <linux/typecheck.h> #include <linux/printk.h> #include <linux/build_bug.h> - +#include <linux/static_call_types.h> #include <asm/byteorder.h> #include <uapi/linux/kernel.h> @@ -81,11 +81,26 @@ struct pt_regs; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() + +extern int __cond_resched(void); +# define might_resched() __cond_resched() + +#elif defined(CONFIG_PREEMPT_DYNAMIC) + +extern int __cond_resched(void); + +DECLARE_STATIC_CALL(might_resched, __cond_resched); + +static __always_inline void might_resched(void) +{ + static_call_mod(might_resched)(); +} + #else + # define might_resched() do { } while (0) -#endif + +#endif /* CONFIG_PREEMPT_* */ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP extern void ___might_sleep(const char *file, int line, int preempt_offset); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 9e93bef52968..8a7aa1d7e0e3 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -300,6 +300,11 @@ struct kimage { /* Information for loading purgatory */ struct purgatory_info purgatory_info; #endif + +#ifdef CONFIG_IMA_KEXEC + /* Virtual address of IMA measurement buffer for kexec syscall */ + void *ima_buffer; +#endif }; /* kexec interface functions */ @@ -309,6 +314,8 @@ extern void machine_kexec_cleanup(struct kimage *image); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); +int machine_kexec_post_load(struct kimage *image); + extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); diff --git a/include/linux/key.h b/include/linux/key.h index 0f2e24f13c2b..7febc4881363 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ +#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); @@ -360,7 +361,7 @@ static inline struct key *request_key(struct key_type *type, * completion of keys undergoing construction with a non-interruptible wait. */ #define request_key_net(type, description, net, callout_info) \ - request_key_tag(type, description, net->key_domain, callout_info); + request_key_tag(type, description, net->key_domain, callout_info) /** * request_key_net_rcu - Request a key for a net namespace under RCU conditions @@ -372,7 +373,7 @@ static inline struct key *request_key(struct key_type *type, * network namespace are used. */ #define request_key_net_rcu(type, description, net) \ - request_key_rcu(type, description, net->key_domain); + request_key_rcu(type, description, net->key_domain) #endif /* CONFIG_NET */ extern int wait_for_key_construction(struct key *key, bool intr); diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index 18f3f5346843..a27605e2f826 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -85,6 +85,9 @@ struct blk_keyslot_manager { int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots); +int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm, + unsigned int num_slots); + blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, const struct blk_crypto_key *key, struct blk_ksm_keyslot **slot_ptr); @@ -103,4 +106,15 @@ void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm); void blk_ksm_destroy(struct blk_keyslot_manager *ksm); +void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent, + const struct blk_keyslot_manager *child); + +void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm); + +bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset, + struct blk_keyslot_manager *ksm_subset); + +void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm, + struct blk_keyslot_manager *reference_ksm); + #endif /* __LINUX_KEYSLOT_MANAGER_H */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h new file mode 100644 index 000000000000..a70d1ea03532 --- /dev/null +++ b/include/linux/kfence.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault + * handler integration. For more info see Documentation/dev-tools/kfence.rst. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef _LINUX_KFENCE_H +#define _LINUX_KFENCE_H + +#include <linux/mm.h> +#include <linux/types.h> + +#ifdef CONFIG_KFENCE + +/* + * We allocate an even number of pages, as it simplifies calculations to map + * address to metadata indices; effectively, the very first page serves as an + * extended guard page, but otherwise has no special purpose. + */ +#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) +extern char *__kfence_pool; + +#ifdef CONFIG_KFENCE_STATIC_KEYS +#include <linux/static_key.h> +DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +#else +#include <linux/atomic.h> +extern atomic_t kfence_allocation_gate; +#endif + +/** + * is_kfence_address() - check if an address belongs to KFENCE pool + * @addr: address to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range. + * + * KFENCE objects live in a separate page range and are not to be intermixed + * with regular heap objects (e.g. KFENCE objects must never be added to the + * allocator freelists). Failing to do so may and will result in heap + * corruptions, therefore is_kfence_address() must be used to check whether + * an object requires specific handling. + * + * Note: This function may be used in fast-paths, and is performance critical. + * Future changes should take this into account; for instance, we want to avoid + * introducing another load and therefore need to keep KFENCE_POOL_SIZE a + * constant (until immediate patching support is added to the kernel). + */ +static __always_inline bool is_kfence_address(const void *addr) +{ + /* + * The non-NULL check is required in case the __kfence_pool pointer was + * never initialized; keep it in the slow-path after the range-check. + */ + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); +} + +/** + * kfence_alloc_pool() - allocate the KFENCE pool via memblock + */ +void __init kfence_alloc_pool(void); + +/** + * kfence_init() - perform KFENCE initialization at boot time + * + * Requires that kfence_alloc_pool() was called before. This sets up the + * allocation gate timer, and requires that workqueues are available. + */ +void __init kfence_init(void); + +/** + * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects + * @s: cache being shut down + * + * Before shutting down a cache, one must ensure there are no remaining objects + * allocated from it. Because KFENCE objects are not referenced from the cache + * directly, we need to check them here. + * + * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does + * not return if allocated objects still exist: it prints an error message and + * simply aborts destruction of a cache, leaking memory. + * + * If the only such objects are KFENCE objects, we will not leak the entire + * cache, but instead try to provide more useful debug info by making allocated + * objects "zombie allocations". Objects may then still be used or freed (which + * is handled gracefully), but usage will result in showing KFENCE error reports + * which include stack traces to the user of the object, the original allocation + * site, and caller to shutdown_cache(). + */ +void kfence_shutdown_cache(struct kmem_cache *s); + +/* + * Allocate a KFENCE object. Allocators must not call this function directly, + * use kfence_alloc() instead. + */ +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); + +/** + * kfence_alloc() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +{ +#ifdef CONFIG_KFENCE_STATIC_KEYS + if (static_branch_unlikely(&kfence_allocation_key)) +#else + if (unlikely(!atomic_read(&kfence_allocation_gate))) +#endif + return __kfence_alloc(s, size, flags); + return NULL; +} + +/** + * kfence_ksize() - get actual amount of memory allocated for a KFENCE object + * @addr: pointer to a heap object + * + * Return: + * * 0 - not a KFENCE object, must call __ksize() instead, + * * non-0 - this many bytes can be accessed without causing a memory error. + * + * kfence_ksize() returns the number of bytes requested for a KFENCE object at + * allocation time. This number may be less than the object size of the + * corresponding struct kmem_cache. + */ +size_t kfence_ksize(const void *addr); + +/** + * kfence_object_start() - find the beginning of a KFENCE object + * @addr: address within a KFENCE-allocated object + * + * Return: address of the beginning of the object. + * + * SL[AU]B-allocated objects are laid out within a page one by one, so it is + * easy to calculate the beginning of an object given a pointer inside it and + * the object size. The same is not true for KFENCE, which places a single + * object at either end of the page. This helper function is used to find the + * beginning of a KFENCE-allocated object. + */ +void *kfence_object_start(const void *addr); + +/** + * __kfence_free() - release a KFENCE heap object to KFENCE pool + * @addr: object to be freed + * + * Requires: is_kfence_address(addr) + * + * Release a KFENCE object and mark it as freed. + */ +void __kfence_free(void *addr); + +/** + * kfence_free() - try to release an arbitrary heap object to KFENCE pool + * @addr: object to be freed + * + * Return: + * * false - object doesn't belong to KFENCE pool and was ignored, + * * true - object was released to KFENCE pool. + * + * Release a KFENCE object and mark it as freed. May be called on any object, + * even non-KFENCE objects, to simplify integration of the hooks into the + * allocator's free codepath. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free(void *addr) +{ + if (!is_kfence_address(addr)) + return false; + __kfence_free(addr); + return true; +} + +/** + * kfence_handle_page_fault() - perform page fault handling for KFENCE pages + * @addr: faulting address + * @is_write: is access a write + * @regs: current struct pt_regs (can be NULL, but shows full stack trace) + * + * Return: + * * false - address outside KFENCE pool, + * * true - page fault handled by KFENCE, no additional handling required. + * + * A page fault inside KFENCE pool indicates a memory error, such as an + * out-of-bounds access, a use-after-free or an invalid memory access. In these + * cases KFENCE prints an error message and marks the offending page as + * present, so that the kernel can proceed. + */ +bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); + +#else /* CONFIG_KFENCE */ + +static inline bool is_kfence_address(const void *addr) { return false; } +static inline void kfence_alloc_pool(void) { } +static inline void kfence_init(void) { } +static inline void kfence_shutdown_cache(struct kmem_cache *s) { } +static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline size_t kfence_ksize(const void *addr) { return 0; } +static inline void *kfence_object_start(const void *addr) { return NULL; } +static inline void __kfence_free(void *addr) { } +static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, + struct pt_regs *regs) +{ + return false; +} + +#endif + +#endif /* _LINUX_KFENCE_H */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 0d6cf64c8bb1..392a3670944c 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -325,7 +325,6 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count); extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); -extern void kgdb_schedule_breakpoint(void); extern int kgdb_has_hit_break(unsigned long addr); extern int @@ -360,9 +359,11 @@ extern atomic_t kgdb_active; extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); +extern void kgdb_free_init_mem(void); #else /* ! CONFIG_KGDB */ #define in_dbg_master() (0) #define dbg_late_init() static inline void kgdb_panic(const char *msg) {} +static inline void kgdb_free_init_mem(void) { } #endif /* ! CONFIG_KGDB */ #endif /* _KGDB_H_ */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index c941b7377321..2fcc01891b47 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -3,6 +3,7 @@ #define _LINUX_KHUGEPAGED_H #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ +#include <linux/shmem_fs.h> #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) if ((khugepaged_always() || + (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && !(vm_flags & VM_NOHUGEPAGE) && !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index b3a36b0cfc81..1883a4a9f16a 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -266,7 +266,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); extern bool arch_kprobe_on_func_entry(unsigned long offset); -extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); +extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); extern int kprobe_add_ksym_blacklist(unsigned long entry); diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h index 0738389b42b6..1a37a664f915 100644 --- a/include/linux/ks0108.h +++ b/include/linux/ks0108.h @@ -4,7 +4,7 @@ * Version: 0.1.0 * Description: ks0108 LCD Controller driver header * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org> * Date: 2006-10-31 */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f3b1013fb22c..1b65e7204344 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -11,6 +11,7 @@ #include <linux/signal.h> #include <linux/sched.h> #include <linux/bug.h> +#include <linux/minmax.h> #include <linux/mm.h> #include <linux/mmu_notifier.h> #include <linux/preempt.h> @@ -425,9 +426,8 @@ struct kvm_irq_routing_table { #define KVM_PRIVATE_MEM_SLOTS 0 #endif -#ifndef KVM_MEM_SLOTS_NUM -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) -#endif +#define KVM_MEM_SLOTS_NUM SHRT_MAX +#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) @@ -451,7 +451,12 @@ struct kvm_memslots { }; struct kvm { +#ifdef KVM_HAVE_MMU_RWLOCK + rwlock_t mmu_lock; +#else spinlock_t mmu_lock; +#endif /* KVM_HAVE_MMU_RWLOCK */ + struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; @@ -502,6 +507,8 @@ struct kvm { struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; + unsigned long mmu_notifier_range_start; + unsigned long mmu_notifier_range_end; #endif long tlbs_dirty; struct list_head devices; @@ -729,7 +736,7 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, - bool *writable); + bool *writable, hva_t *hva); void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn); @@ -1203,6 +1210,26 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) return 1; return 0; } + +static inline int mmu_notifier_retry_hva(struct kvm *kvm, + unsigned long mmu_seq, + unsigned long hva) +{ + lockdep_assert_held(&kvm->mmu_lock); + /* + * If mmu_notifier_count is non-zero, then the range maintained by + * kvm_mmu_notifier_invalidate_range_start contains all addresses that + * might be being invalidated. Note that it may include some false + * positives, due to shortcuts when handing concurrent invalidations. + */ + if (unlikely(kvm->mmu_notifier_count) && + hva >= kvm->mmu_notifier_range_start && + hva < kvm->mmu_notifier_range_end) + return 1; + if (kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index 21a3358a1731..612b4cab3819 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -85,6 +85,7 @@ static inline struct led_classdev_flash *lcdev_to_flcdev( return container_of(lcdev, struct led_classdev_flash, led_cdev); } +#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) /** * led_classdev_flash_register_ext - register a new object of LED class with * init data and with support for flash LEDs @@ -98,12 +99,6 @@ int led_classdev_flash_register_ext(struct device *parent, struct led_classdev_flash *fled_cdev, struct led_init_data *init_data); -static inline int led_classdev_flash_register(struct device *parent, - struct led_classdev_flash *fled_cdev) -{ - return led_classdev_flash_register_ext(parent, fled_cdev, NULL); -} - /** * led_classdev_flash_unregister - unregisters an object of led_classdev class * with support for flash LEDs @@ -118,15 +113,44 @@ int devm_led_classdev_flash_register_ext(struct device *parent, struct led_init_data *init_data); +void devm_led_classdev_flash_unregister(struct device *parent, + struct led_classdev_flash *fled_cdev); + +#else + +static inline int led_classdev_flash_register_ext(struct device *parent, + struct led_classdev_flash *fled_cdev, + struct led_init_data *init_data) +{ + return 0; +} + +static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {}; +static inline int devm_led_classdev_flash_register_ext(struct device *parent, + struct led_classdev_flash *fled_cdev, + struct led_init_data *init_data) +{ + return 0; +} + +static inline void devm_led_classdev_flash_unregister(struct device *parent, + struct led_classdev_flash *fled_cdev) +{}; + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */ + +static inline int led_classdev_flash_register(struct device *parent, + struct led_classdev_flash *fled_cdev) +{ + return led_classdev_flash_register_ext(parent, fled_cdev, NULL); +} + static inline int devm_led_classdev_flash_register(struct device *parent, struct led_classdev_flash *fled_cdev) { return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL); } -void devm_led_classdev_flash_unregister(struct device *parent, - struct led_classdev_flash *fled_cdev); - /** * led_set_flash_strobe - setup flash strobe * @fled_cdev: the flash LED to set strobe on diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h index 5116f9a866cc..210d57bcd767 100644 --- a/include/linux/led-class-multicolor.h +++ b/include/linux/led-class-multicolor.h @@ -44,12 +44,6 @@ int led_classdev_multicolor_register_ext(struct device *parent, struct led_classdev_mc *mcled_cdev, struct led_init_data *init_data); -static inline int led_classdev_multicolor_register(struct device *parent, - struct led_classdev_mc *mcled_cdev) -{ - return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); -} - /** * led_classdev_multicolor_unregister - unregisters an object of led_classdev * class with support for multicolor LEDs @@ -68,13 +62,6 @@ int devm_led_classdev_multicolor_register_ext(struct device *parent, struct led_classdev_mc *mcled_cdev, struct led_init_data *init_data); -static inline int devm_led_classdev_multicolor_register(struct device *parent, - struct led_classdev_mc *mcled_cdev) -{ - return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev, - NULL); -} - void devm_led_classdev_multicolor_unregister(struct device *parent, struct led_classdev_mc *mcled_cdev); #else @@ -83,27 +70,33 @@ static inline int led_classdev_multicolor_register_ext(struct device *parent, struct led_classdev_mc *mcled_cdev, struct led_init_data *init_data) { - return -EINVAL; -} - -static inline int led_classdev_multicolor_register(struct device *parent, - struct led_classdev_mc *mcled_cdev) -{ - return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); + return 0; } static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {}; static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, enum led_brightness brightness) { - return -EINVAL; + return 0; } static inline int devm_led_classdev_multicolor_register_ext(struct device *parent, struct led_classdev_mc *mcled_cdev, struct led_init_data *init_data) { - return -EINVAL; + return 0; +} + +static inline void devm_led_classdev_multicolor_unregister(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{}; + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */ + +static inline int led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); } static inline int devm_led_classdev_multicolor_register(struct device *parent, @@ -113,9 +106,4 @@ static inline int devm_led_classdev_multicolor_register(struct device *parent, NULL); } -static inline void devm_led_classdev_multicolor_unregister(struct device *parent, - struct led_classdev_mc *mcled_cdev) -{}; - -#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */ #endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 6a8d6409c993..329fd914cf24 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -63,8 +63,8 @@ struct led_hw_trigger_type { struct led_classdev { const char *name; - enum led_brightness brightness; - enum led_brightness max_brightness; + unsigned int brightness; + unsigned int max_brightness; int flags; /* Lower 16 bits reflect status */ @@ -253,8 +253,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev, * software blink timer that implements blinking when the * hardware doesn't. This function is guaranteed not to sleep. */ -void led_set_brightness(struct led_classdev *led_cdev, - enum led_brightness brightness); +void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness); /** * led_set_brightness_sync - set LED brightness synchronously @@ -267,8 +266,7 @@ void led_set_brightness(struct led_classdev *led_cdev, * * Returns: 0 on success or negative error value on failure */ -int led_set_brightness_sync(struct led_classdev *led_cdev, - enum led_brightness value); +int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value); /** * led_update_brightness - update LED brightness @@ -565,7 +563,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt) #ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED void led_classdev_notify_brightness_hw_changed( - struct led_classdev *led_cdev, enum led_brightness brightness); + struct led_classdev *led_cdev, unsigned int brightness); #else static inline void led_classdev_notify_brightness_hw_changed( struct led_classdev *led_cdev, enum led_brightness brightness) { } diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5bcfbd972e97..dbf8506decca 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -178,6 +178,11 @@ * Objtool generates debug info for both FUNC & CODE, but needs special * annotations for each CODE's start (to describe the actual stack frame). * + * Objtool requires that all code must be contained in an ELF symbol. Symbol + * names that have a .L prefix do not emit symbol table entries. .L + * prefixed symbols can be used within a code region, but should be avoided for + * denoting a range of code via ``SYM_*_START/END`` annotations. + * * ALIAS -- does not generate debug info -- the aliased function will */ diff --git a/include/linux/list.h b/include/linux/list.h index 89bdc92e75c3..f2af4b4aa4e9 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -901,7 +901,7 @@ static inline void hlist_add_before(struct hlist_node *n, } /** - * hlist_add_behing - add a new entry after the one specified + * hlist_add_behind - add a new entry after the one specified * @n: new entry to be added * @prev: hlist node to add it after, which must be non-NULL */ diff --git a/include/linux/litex.h b/include/linux/litex.h index 40f5be503593..5ea9ccf5cce4 100644 --- a/include/linux/litex.h +++ b/include/linux/litex.h @@ -3,9 +3,6 @@ * Common LiteX header providing * helper functions for accessing CSRs. * - * Implementation of the functions is provided by - * the LiteX SoC Controller driver. - * * Copyright (C) 2019-2020 Antmicro <www.antmicro.com> */ @@ -13,90 +10,147 @@ #define _LINUX_LITEX_H #include <linux/io.h> -#include <linux/types.h> -#include <linux/compiler_types.h> + +/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */ +#if defined(CONFIG_LITEX_SUBREG_SIZE) && \ + (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4) +#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE +#else +#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1! +#endif +#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8) + +/* LiteX subregisters of any width are always aligned on a 4-byte boundary */ +#define LITEX_SUBREG_ALIGN 0x4 + +static inline void _write_litex_subregister(u32 val, void __iomem *addr) +{ + writel((u32 __force)cpu_to_le32(val), addr); +} + +static inline u32 _read_litex_subregister(void __iomem *addr) +{ + return le32_to_cpu((__le32 __force)readl(addr)); +} /* - * The parameters below are true for LiteX SoCs configured for 8-bit CSR Bus, - * 32-bit aligned. + * LiteX SoC Generator, depending on the configuration, can split a single + * logical CSR (Control&Status Register) into a series of consecutive physical + * registers. + * + * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned, + * 32-bit wide logical CSR will be laid out as four 32-bit physical + * subregisters, each one containing one byte of meaningful data. * - * Supporting other configurations will require extending the logic in this - * header and in the LiteX SoC controller driver. + * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus */ -#define LITEX_REG_SIZE 0x4 -#define LITEX_SUBREG_SIZE 0x1 -#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8) -#define WRITE_LITEX_SUBREGISTER(val, base_offset, subreg_id) \ - writel((u32 __force)cpu_to_le32(val), base_offset + (LITEX_REG_SIZE * subreg_id)) +/* number of LiteX subregisters needed to store a register of given reg_size */ +#define _litex_num_subregs(reg_size) \ + (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1) -#define READ_LITEX_SUBREGISTER(base_offset, subreg_id) \ - le32_to_cpu((__le32 __force)readl(base_offset + (LITEX_REG_SIZE * subreg_id))) +/* + * since the number of 4-byte aligned subregisters required to store a single + * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the + * next adjacent LiteX CSR register w.r.t. the offset of the current one also + * depends on how many subregisters the latter is spread across + */ +#define _next_reg_off(off, size) \ + ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN) -void litex_set_reg(void __iomem *reg, unsigned long reg_sz, unsigned long val); +/* + * The purpose of `_litex_[set|get]_reg()` is to implement the logic of + * writing to/reading from the LiteX CSR in a single place that can be then + * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()` + * accessors for the appropriate data width. + * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly + * discouraged, as they perform no error checking on the requested data width! + */ -unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_sz); +/** + * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register) + * @reg: Address of the CSR + * @reg_size: The width of the CSR expressed in the number of bytes + * @val: Value to be written to the CSR + * + * This function splits a single (possibly multi-byte) LiteX CSR write into + * a series of subregister writes with a proper offset. + * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). + */ +static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val) +{ + u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT; + + while (shift > 0) { + shift -= LITEX_SUBREG_SIZE_BIT; + _write_litex_subregister(val >> shift, reg); + reg += LITEX_SUBREG_ALIGN; + } +} + +/** + * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register) + * @reg: Address of the CSR + * @reg_size: The width of the CSR expressed in the number of bytes + * + * Return: Value read from the CSR + * + * This function generates a series of subregister reads with a proper offset + * and joins their results into a single (possibly multi-byte) LiteX CSR value. + * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). + */ +static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size) +{ + u64 r; + u8 i; + + r = _read_litex_subregister(reg); + for (i = 1; i < _litex_num_subregs(reg_size); i++) { + r <<= LITEX_SUBREG_SIZE_BIT; + reg += LITEX_SUBREG_ALIGN; + r |= _read_litex_subregister(reg); + } + return r; +} static inline void litex_write8(void __iomem *reg, u8 val) { - WRITE_LITEX_SUBREGISTER(val, reg, 0); + _litex_set_reg(reg, sizeof(u8), val); } static inline void litex_write16(void __iomem *reg, u16 val) { - WRITE_LITEX_SUBREGISTER(val >> 8, reg, 0); - WRITE_LITEX_SUBREGISTER(val, reg, 1); + _litex_set_reg(reg, sizeof(u16), val); } static inline void litex_write32(void __iomem *reg, u32 val) { - WRITE_LITEX_SUBREGISTER(val >> 24, reg, 0); - WRITE_LITEX_SUBREGISTER(val >> 16, reg, 1); - WRITE_LITEX_SUBREGISTER(val >> 8, reg, 2); - WRITE_LITEX_SUBREGISTER(val, reg, 3); + _litex_set_reg(reg, sizeof(u32), val); } static inline void litex_write64(void __iomem *reg, u64 val) { - WRITE_LITEX_SUBREGISTER(val >> 56, reg, 0); - WRITE_LITEX_SUBREGISTER(val >> 48, reg, 1); - WRITE_LITEX_SUBREGISTER(val >> 40, reg, 2); - WRITE_LITEX_SUBREGISTER(val >> 32, reg, 3); - WRITE_LITEX_SUBREGISTER(val >> 24, reg, 4); - WRITE_LITEX_SUBREGISTER(val >> 16, reg, 5); - WRITE_LITEX_SUBREGISTER(val >> 8, reg, 6); - WRITE_LITEX_SUBREGISTER(val, reg, 7); + _litex_set_reg(reg, sizeof(u64), val); } static inline u8 litex_read8(void __iomem *reg) { - return READ_LITEX_SUBREGISTER(reg, 0); + return _litex_get_reg(reg, sizeof(u8)); } static inline u16 litex_read16(void __iomem *reg) { - return (READ_LITEX_SUBREGISTER(reg, 0) << 8) - | (READ_LITEX_SUBREGISTER(reg, 1)); + return _litex_get_reg(reg, sizeof(u16)); } static inline u32 litex_read32(void __iomem *reg) { - return (READ_LITEX_SUBREGISTER(reg, 0) << 24) - | (READ_LITEX_SUBREGISTER(reg, 1) << 16) - | (READ_LITEX_SUBREGISTER(reg, 2) << 8) - | (READ_LITEX_SUBREGISTER(reg, 3)); + return _litex_get_reg(reg, sizeof(u32)); } static inline u64 litex_read64(void __iomem *reg) { - return ((u64)READ_LITEX_SUBREGISTER(reg, 0) << 56) - | ((u64)READ_LITEX_SUBREGISTER(reg, 1) << 48) - | ((u64)READ_LITEX_SUBREGISTER(reg, 2) << 40) - | ((u64)READ_LITEX_SUBREGISTER(reg, 3) << 32) - | ((u64)READ_LITEX_SUBREGISTER(reg, 4) << 24) - | ((u64)READ_LITEX_SUBREGISTER(reg, 5) << 16) - | ((u64)READ_LITEX_SUBREGISTER(reg, 6) << 8) - | ((u64)READ_LITEX_SUBREGISTER(reg, 7)); + return _litex_get_reg(reg, sizeof(u64)); } #endif /* _LINUX_LITEX_H */ diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 4a8795b21d77..ded90b097e6e 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -18,6 +18,7 @@ typedef struct { .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ } #else # define LL_DEP_MAP_INIT(lockname) @@ -30,7 +31,9 @@ do { \ static struct lock_class_key __key; \ \ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ - lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \ + LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_PERCPU); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b9e9adec73e8..1c746139d5e3 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -185,12 +185,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key); * to lockdep: */ -extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass, short inner, short outer); +extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); + +static inline void +lockdep_init_map_waits(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, u8 inner, u8 outer) +{ + lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); +} static inline void lockdep_init_map_wait(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass, short inner) + struct lock_class_key *key, int subclass, u8 inner) { lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); } @@ -310,6 +317,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ } while (0) +#define lockdep_assert_none_held_once() do { \ + WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ + } while (0) + #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) @@ -340,6 +351,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) +# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ + do { (void)(name); (void)(key); } while (0) # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ do { (void)(name); (void)(key); } while (0) # define lockdep_init_map_wait(lock, name, key, sub, inner) \ @@ -387,6 +400,7 @@ extern int lockdep_is_held(const void *); #define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) +#define lockdep_assert_none_held_once() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 9a1fd49df17f..2ec9ff5a7fff 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -30,6 +30,12 @@ enum lockdep_wait_type { LD_WAIT_MAX, /* must be last */ }; +enum lockdep_lock_type { + LD_LOCK_NORMAL = 0, /* normal, catch all */ + LD_LOCK_PERCPU, /* percpu */ + LD_LOCK_MAX, +}; + #ifdef CONFIG_LOCKDEP /* @@ -119,8 +125,10 @@ struct lock_class { int name_version; const char *name; - short wait_type_inner; - short wait_type_outer; + u8 wait_type_inner; + u8 wait_type_outer; + u8 lock_type; + /* u8 hole; */ #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; @@ -169,8 +177,10 @@ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; const char *name; - short wait_type_outer; /* can be taken in this context */ - short wait_type_inner; /* presents this context */ + u8 wait_type_outer; /* can be taken in this context */ + u8 wait_type_inner; /* presents this context */ + u8 lock_type; + /* u8 hole; */ #ifdef CONFIG_LOCK_STAT int cpu; unsigned long ip; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 7aaa753b8608..477a597db013 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -113,6 +113,8 @@ LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) LSM_HOOK(int, 0, inode_init_security, struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len) +LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode, + const struct qstr *name, const struct inode *context_inode) LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir, @@ -133,17 +135,20 @@ LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode, LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask) LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr) LSM_HOOK(int, 0, inode_getattr, const struct path *path) -LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) +LSM_HOOK(int, 0, inode_setxattr, struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry) -LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_removexattr, struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry) -LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry) -LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode, - const char *name, void **buffer, bool alloc) +LSM_HOOK(int, 0, inode_killpriv, struct user_namespace *mnt_userns, + struct dentry *dentry) +LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct user_namespace *mnt_userns, + struct inode *inode, const char *name, void **buffer, bool alloc) LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index a19adef1f088..fb7f3193753d 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -233,6 +233,15 @@ * Returns 0 if @name and @value have been successfully set, * -EOPNOTSUPP if no security attribute is needed, or * -ENOMEM on memory allocation failure. + * @inode_init_security_anon: + * Set up the incore security field for the new anonymous inode + * and return whether the inode creation is permitted by the security + * module or not. + * @inode contains the inode structure + * @name name of the anonymous inode class + * @context_inode optional related inode + * Returns 0 on success, -EACCES if the security module denies the + * creation of this inode, or another -errno upon other errors. * @inode_create: * Check permission to create a regular file. * @dir contains inode structure of the parent of the new file. @@ -444,6 +453,7 @@ * @inode_killpriv: * The setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. + * @mnt_userns: user namespace of the mount * @dentry is the dentry being changed. * Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 9004375c462e..27eb383cb95d 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -42,7 +42,7 @@ struct device *mdev_get_iommu_device(struct device *dev); * @mdev: mdev_device structure on of mediated device * that is being created * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for a + * @remove: Called to free resources in parent device's driver for * a mediated device. It is mandatory to provide 'remove' * ops. * @mdev: mdev_device device structure which is being diff --git a/include/linux/mdio.h b/include/linux/mdio.h index dbd69b3d170b..ffb787d5ebde 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -49,7 +49,11 @@ struct mdio_device { unsigned int reset_assert_delay; unsigned int reset_deassert_delay; }; -#define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +static inline struct mdio_device *to_mdio_device(const struct device *dev) +{ + return container_of(dev, struct mdio_device, dev); +} /* struct mdio_driver_common: Common to all MDIO drivers */ struct mdio_driver_common { @@ -57,8 +61,12 @@ struct mdio_driver_common { int flags; }; #define MDIO_DEVICE_FLAG_PHY 1 -#define to_mdio_common_driver(d) \ - container_of(d, struct mdio_driver_common, driver) + +static inline struct mdio_driver_common * +to_mdio_common_driver(const struct device_driver *driver) +{ + return container_of(driver, struct mdio_driver_common, driver); +} /* struct mdio_driver: Generic MDIO driver */ struct mdio_driver { @@ -73,8 +81,13 @@ struct mdio_driver { /* Clears up any memory if needed */ void (*remove)(struct mdio_device *mdiodev); }; -#define to_mdio_driver(d) \ - container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) + +static inline struct mdio_driver * +to_mdio_driver(const struct device_driver *driver) +{ + return container_of(to_mdio_common_driver(driver), struct mdio_driver, + mdiodrv); +} /* device driver data */ static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data) diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 959ad7d850b4..07f5ef8fc456 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -68,7 +68,7 @@ struct mei_cl_driver { int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); - int (*remove)(struct mei_cl_device *cldev); + void (*remove)(struct mei_cl_device *cldev); }; int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index b93c44b9121e..d13e3cd938b4 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -117,7 +117,7 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); -unsigned long memblock_free_all(void); +void memblock_free_all(void); void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); @@ -272,7 +272,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn); /** - * for_each_free_mem_range_in_zone - iterate through zone specific free + * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free * memblock areas * @i: u64 used as loop variable * @zone: zone in which all of the memory blocks reside @@ -292,7 +292,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) /** - * for_each_free_mem_range_in_zone_from - iterate through zone specific + * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific * free memblock areas from a given point * @i: u64 used as loop variable * @zone: zone in which all of the memory blocks reside @@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) /* * Set the allocation direction to bottom-up or top-down. */ -static inline void memblock_set_bottom_up(bool enable) +static inline __init void memblock_set_bottom_up(bool enable) { memblock.bottom_up = enable; } @@ -470,7 +470,7 @@ static inline void memblock_set_bottom_up(bool enable) * if this is true, that said, memblock will allocate memory * in bottom-up direction. */ -static inline bool memblock_bottom_up(void) +static inline __init bool memblock_bottom_up(void) { return memblock.bottom_up; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index eeb0b52203e9..0c04d39a7967 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -92,6 +92,10 @@ struct lruvec_stat { long count[NR_VM_NODE_STAT_ITEMS]; }; +struct batched_lruvec_stat { + s32 count[NR_VM_NODE_STAT_ITEMS]; +}; + /* * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, * which have elements charged to this memcg. @@ -107,11 +111,17 @@ struct memcg_shrinker_map { struct mem_cgroup_per_node { struct lruvec lruvec; - /* Legacy local VM stats */ + /* + * Legacy local VM stats. This should be struct lruvec_stat and + * cannot be optimized to struct batched_lruvec_stat. Because + * the threshold of the lruvec_stat_cpu can be as big as + * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this + * filed has no upper limit. + */ struct lruvec_stat __percpu *lruvec_stat_local; /* Subtree VM stats (batched updates) */ - struct lruvec_stat __percpu *lruvec_stat_cpu; + struct batched_lruvec_stat __percpu *lruvec_stat_cpu; atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -475,19 +485,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } -/* - * set_page_objcgs - associate a page with a object cgroups vector - * @page: a pointer to the page struct - * @objcgs: a pointer to the object cgroups vector - * - * Atomically associates a page with a vector of object cgroups. - */ -static inline bool set_page_objcgs(struct page *page, - struct obj_cgroup **objcgs) -{ - return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs | - MEMCG_DATA_OBJCGS); -} #else static inline struct obj_cgroup **page_objcgs(struct page *page) { @@ -498,12 +495,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) { return NULL; } - -static inline bool set_page_objcgs(struct page *page, - struct obj_cgroup **objcgs) -{ - return true; -} #endif static __always_inline bool memcg_stat_item_in_bytes(int idx) @@ -689,8 +680,6 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); -struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); - struct lruvec *lock_page_lruvec(struct page *page); struct lruvec *lock_page_lruvec_irq(struct page *page); struct lruvec *lock_page_lruvec_irqsave(struct page *page, @@ -1072,9 +1061,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -void mem_cgroup_split_huge_fixup(struct page *head); -#endif +void split_page_memcg(struct page *head, unsigned int nr); #else /* CONFIG_MEMCG */ @@ -1200,11 +1187,6 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } -static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) -{ - return NULL; -} - static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } @@ -1416,7 +1398,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, return 0; } -static inline void mem_cgroup_split_huge_fixup(struct page *head) +static inline void split_page_memcg(struct page *head, unsigned int nr) { } @@ -1601,9 +1583,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, #endif #ifdef CONFIG_MEMCG_KMEM -int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, - unsigned int nr_pages); -void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); diff --git a/include/linux/memory.h b/include/linux/memory.h index 439a89e758d8..4da95e684e20 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -27,9 +27,8 @@ struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ int online_type; /* for passing data to online routine */ - int phys_device; /* to which fru does this belong? */ - struct device dev; int nid; /* NID for this memory block */ + struct device dev; }; int arch_get_memory_phys_device(unsigned long start_pfn); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 15acce5ab106..7288aa5ef73b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,22 +16,7 @@ struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG -/* - * Return page for the valid pfn only if the page is online. All pfn - * walkers which rely on the fully initialized page->flags and others - * should use this rather than pfn_valid && pfn_to_page - */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - unsigned long ___pfn = pfn; \ - unsigned long ___nr = pfn_to_section_nr(___pfn); \ - \ - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ - pfn_valid_within(___pfn)) \ - ___page = pfn_to_page(___pfn); \ - ___page; \ -}) +struct page *pfn_to_online_page(unsigned long pfn); /* * Types for free bootmem stored in page->lru.next. These have to be in @@ -68,7 +53,7 @@ typedef int __bitwise mhp_t; * with this flag set, the resource pointer must no longer be used as it * might be stale, or the resource might have changed. */ -#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) +#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* * Extended parameters for memory hotplug: @@ -81,6 +66,9 @@ struct mhp_params { pgprot_t pgprot; }; +bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); +struct range mhp_get_pluggable_range(bool need_mapping); + /* * Zone resizing functions * @@ -131,10 +119,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params); extern u64 max_mem_size; -extern int memhp_online_type_from_str(const char *str); +extern int mhp_online_type_from_str(const char *str); /* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int memhp_default_online_type; +extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) @@ -281,6 +269,13 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +/* + * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some + * platforms might override and use arch_get_mappable_range() + * for internal non memory hotplug purposes. + */ +struct range arch_get_mappable_range(void); + #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 79c49e7f5c30..f5b464daeeca 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); +bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); @@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, return NULL; } +static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) +{ + return false; +} + static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) { return 0; diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 524a7e4702c2..302a330c5c84 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -368,7 +368,6 @@ struct ab8500 { int it_latchhier_num; }; -struct ab8500_regulator_platform_data; struct ab8500_codec_platform_data; struct ab8500_sysctrl_platform_data; @@ -376,11 +375,9 @@ struct ab8500_sysctrl_platform_data; * struct ab8500_platform_data - AB8500 platform data * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used * @init: board-specific initialization after detection of ab8500 - * @regulator: machine-specific constraints for regulators */ struct ab8500_platform_data { void (*init) (struct ab8500 *); - struct ab8500_regulator_platform_data *regulator; struct ab8500_codec_platform_data *codec; struct ab8500_sysctrl_platform_data *sysctrl; }; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index fd5957c042da..9ab0e2fca7ea 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -696,6 +696,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x); * * This tells the axp20x core to remove the associated mfd devices */ -int axp20x_device_remove(struct axp20x_dev *axp20x); +void axp20x_device_remove(struct axp20x_dev *axp20x); #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h index eb05569f752b..8efd99d07c9e 100644 --- a/include/linux/mfd/bd9571mwv.h +++ b/include/linux/mfd/bd9571mwv.h @@ -1,16 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* - * ROHM BD9571MWV-M driver + * ROHM BD9571MWV-M and BD9574MWF-M driver * * Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether expressed or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. + * Copyright (C) 2020 Renesas Electronics Corporation * * Based on the TPS65086 driver */ @@ -21,11 +14,12 @@ #include <linux/device.h> #include <linux/regmap.h> -/* List of registers for BD9571MWV */ +/* List of registers for BD9571MWV and BD9574MWF */ #define BD9571MWV_VENDOR_CODE 0x00 #define BD9571MWV_VENDOR_CODE_VAL 0xdb #define BD9571MWV_PRODUCT_CODE 0x01 -#define BD9571MWV_PRODUCT_CODE_VAL 0x60 +#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60 +#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74 #define BD9571MWV_PRODUCT_REVISION 0x02 #define BD9571MWV_I2C_FUSA_MODE 0x10 @@ -55,6 +49,7 @@ #define BD9571MWV_VD33_VID 0x44 #define BD9571MWV_DVFS_VINIT 0x50 +#define BD9574MWF_VD09_VINIT 0x51 #define BD9571MWV_DVFS_SETVMAX 0x52 #define BD9571MWV_DVFS_BOOSTVID 0x53 #define BD9571MWV_DVFS_SETVID 0x54 @@ -68,6 +63,7 @@ #define BD9571MWV_GPIO_INT_SET 0x64 #define BD9571MWV_GPIO_INT 0x65 #define BD9571MWV_GPIO_INTMASK 0x66 +#define BD9574MWF_GPIO_MUX 0x67 #define BD9571MWV_REG_KEEP(n) (0x70 + (n)) @@ -77,6 +73,8 @@ #define BD9571MWV_PROT_ERROR_STATUS2 0x83 #define BD9571MWV_PROT_ERROR_STATUS3 0x84 #define BD9571MWV_PROT_ERROR_STATUS4 0x85 +#define BD9574MWF_PROT_ERROR_STATUS5 0x86 +#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87 #define BD9571MWV_INT_INTREQ 0x90 #define BD9571MWV_INT_INTREQ_MD1_INT BIT(0) @@ -89,6 +87,12 @@ #define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7) #define BD9571MWV_INT_INTMASK 0x91 +#define BD9574MWF_SSCG_CNT 0xA0 +#define BD9574MWF_POFFB_MRB 0xA1 +#define BD9574MWF_SMRB_WR_PROT 0xA2 +#define BD9574MWF_SMRB_ASSERT 0xA3 +#define BD9574MWF_SMRB_STATUS 0xA4 + #define BD9571MWV_ACCESS_KEY 0xff /* Define the BD9571MWV IRQ numbers */ @@ -98,23 +102,8 @@ enum bd9571mwv_irqs { BD9571MWV_IRQ_MD2_E2, BD9571MWV_IRQ_PROT_ERR, BD9571MWV_IRQ_GP, - BD9571MWV_IRQ_128H_OF, + BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */ BD9571MWV_IRQ_WDT_OF, BD9571MWV_IRQ_BKUP_TRG, }; - -/** - * struct bd9571mwv - state holder for the bd9571mwv driver - * - * Device data may be used to access the BD9571MWV chip - */ -struct bd9571mwv { - struct device *dev; - struct regmap *regmap; - - /* IRQ Data */ - int irq; - struct regmap_irq_chip_data *irq_data; -}; - #endif /* __LINUX_MFD_BD9571MWV_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 4b35baa14d30..2009c4b936d9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -28,13 +28,13 @@ .id = (_id), \ } -#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ +#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL) -#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ +#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL) -#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ +#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match) #define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h index 2c8896fd852e..2660226138b8 100644 --- a/include/linux/mfd/hi6421-spmi-pmic.h +++ b/include/linux/mfd/hi6421-spmi-pmic.h @@ -4,6 +4,7 @@ * * Copyright (c) 2013 Linaro Ltd. * Copyright (C) 2011 Hisilicon. + * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd * * Guodong Xu <guodong.xu@linaro.org> */ @@ -12,10 +13,7 @@ #define __HISI_PMIC_H #include <linux/irqdomain.h> - -#define HISI_REGS_ENA_PROTECT_TIME (0) /* in microseconds */ -#define HISI_ECO_MODE_ENABLE (1) -#define HISI_ECO_MODE_DISABLE (0) +#include <linux/regmap.h> struct hi6421_spmi_pmic { struct resource *res; @@ -26,28 +24,7 @@ struct hi6421_spmi_pmic { int irq; int gpio; unsigned int *irqs; + struct regmap *regmap; }; -int hi6421_spmi_pmic_read(struct hi6421_spmi_pmic *pmic, int reg); -int hi6421_spmi_pmic_write(struct hi6421_spmi_pmic *pmic, int reg, u32 val); -int hi6421_spmi_pmic_rmw(struct hi6421_spmi_pmic *pmic, int reg, - u32 mask, u32 bits); - -enum hi6421_spmi_pmic_irq_list { - OTMP = 0, - VBUS_CONNECT, - VBUS_DISCONNECT, - ALARMON_R, - HOLD_6S, - HOLD_1S, - POWERKEY_UP, - POWERKEY_DOWN, - OCP_SCP_R, - COUL_R, - SIM0_HPD_R, - SIM0_HPD_F, - SIM1_HPD_R, - SIM1_HPD_F, - PMIC_IRQ_LIST_MAX, -}; #endif /* __HISI_PMIC_H */ diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h index c8ef2f1654a4..74d4e193966a 100644 --- a/include/linux/mfd/intel-m10-bmc.h +++ b/include/linux/mfd/intel-m10-bmc.h @@ -15,6 +15,15 @@ /* Register offset of system registers */ #define NIOS2_FW_VERSION 0x0 +#define M10BMC_MAC_LOW 0x10 +#define M10BMC_MAC_BYTE4 GENMASK(7, 0) +#define M10BMC_MAC_BYTE3 GENMASK(15, 8) +#define M10BMC_MAC_BYTE2 GENMASK(23, 16) +#define M10BMC_MAC_BYTE1 GENMASK(31, 24) +#define M10BMC_MAC_HIGH 0x14 +#define M10BMC_MAC_BYTE6 GENMASK(7, 0) +#define M10BMC_MAC_BYTE5 GENMASK(15, 8) +#define M10BMC_MAC_COUNT GENMASK(23, 16) #define M10BMC_TEST_REG 0x3c #define M10BMC_BUILD_VER 0x68 #define M10BMC_VER_MAJOR_MSK GENMASK(23, 16) diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h deleted file mode 100644 index 317e8608cf41..000000000000 --- a/include/linux/mfd/intel_msic.h +++ /dev/null @@ -1,453 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Core interface for Intel MSIC - * - * Copyright (C) 2011, Intel Corporation - * Author: Mika Westerberg <mika.westerberg@linux.intel.com> - */ - -#ifndef __LINUX_MFD_INTEL_MSIC_H__ -#define __LINUX_MFD_INTEL_MSIC_H__ - -/* ID */ -#define INTEL_MSIC_ID0 0x000 /* RO */ -#define INTEL_MSIC_ID1 0x001 /* RO */ - -/* IRQ */ -#define INTEL_MSIC_IRQLVL1 0x002 -#define INTEL_MSIC_ADC1INT 0x003 -#define INTEL_MSIC_CCINT 0x004 -#define INTEL_MSIC_PWRSRCINT 0x005 -#define INTEL_MSIC_PWRSRCINT1 0x006 -#define INTEL_MSIC_CHRINT 0x007 -#define INTEL_MSIC_CHRINT1 0x008 -#define INTEL_MSIC_RTCIRQ 0x009 -#define INTEL_MSIC_GPIO0LVIRQ 0x00a -#define INTEL_MSIC_GPIO1LVIRQ 0x00b -#define INTEL_MSIC_GPIOHVIRQ 0x00c -#define INTEL_MSIC_VRINT 0x00d -#define INTEL_MSIC_OCAUDIO 0x00e -#define INTEL_MSIC_ACCDET 0x00f -#define INTEL_MSIC_RESETIRQ1 0x010 -#define INTEL_MSIC_RESETIRQ2 0x011 -#define INTEL_MSIC_MADC1INT 0x012 -#define INTEL_MSIC_MCCINT 0x013 -#define INTEL_MSIC_MPWRSRCINT 0x014 -#define INTEL_MSIC_MPWRSRCINT1 0x015 -#define INTEL_MSIC_MCHRINT 0x016 -#define INTEL_MSIC_MCHRINT1 0x017 -#define INTEL_MSIC_RTCIRQMASK 0x018 -#define INTEL_MSIC_GPIO0LVIRQMASK 0x019 -#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a -#define INTEL_MSIC_GPIOHVIRQMASK 0x01b -#define INTEL_MSIC_VRINTMASK 0x01c -#define INTEL_MSIC_OCAUDIOMASK 0x01d -#define INTEL_MSIC_ACCDETMASK 0x01e -#define INTEL_MSIC_RESETIRQ1MASK 0x01f -#define INTEL_MSIC_RESETIRQ2MASK 0x020 -#define INTEL_MSIC_IRQLVL1MSK 0x021 -#define INTEL_MSIC_PBCONFIG 0x03e -#define INTEL_MSIC_PBSTATUS 0x03f /* RO */ - -/* GPIO */ -#define INTEL_MSIC_GPIO0LV7CTLO 0x040 -#define INTEL_MSIC_GPIO0LV6CTLO 0x041 -#define INTEL_MSIC_GPIO0LV5CTLO 0x042 -#define INTEL_MSIC_GPIO0LV4CTLO 0x043 -#define INTEL_MSIC_GPIO0LV3CTLO 0x044 -#define INTEL_MSIC_GPIO0LV2CTLO 0x045 -#define INTEL_MSIC_GPIO0LV1CTLO 0x046 -#define INTEL_MSIC_GPIO0LV0CTLO 0x047 -#define INTEL_MSIC_GPIO1LV7CTLOS 0x048 -#define INTEL_MSIC_GPIO1LV6CTLO 0x049 -#define INTEL_MSIC_GPIO1LV5CTLO 0x04a -#define INTEL_MSIC_GPIO1LV4CTLO 0x04b -#define INTEL_MSIC_GPIO1LV3CTLO 0x04c -#define INTEL_MSIC_GPIO1LV2CTLO 0x04d -#define INTEL_MSIC_GPIO1LV1CTLO 0x04e -#define INTEL_MSIC_GPIO1LV0CTLO 0x04f -#define INTEL_MSIC_GPIO0LV7CTLI 0x050 -#define INTEL_MSIC_GPIO0LV6CTLI 0x051 -#define INTEL_MSIC_GPIO0LV5CTLI 0x052 -#define INTEL_MSIC_GPIO0LV4CTLI 0x053 -#define INTEL_MSIC_GPIO0LV3CTLI 0x054 -#define INTEL_MSIC_GPIO0LV2CTLI 0x055 -#define INTEL_MSIC_GPIO0LV1CTLI 0x056 -#define INTEL_MSIC_GPIO0LV0CTLI 0x057 -#define INTEL_MSIC_GPIO1LV7CTLIS 0x058 -#define INTEL_MSIC_GPIO1LV6CTLI 0x059 -#define INTEL_MSIC_GPIO1LV5CTLI 0x05a -#define INTEL_MSIC_GPIO1LV4CTLI 0x05b -#define INTEL_MSIC_GPIO1LV3CTLI 0x05c -#define INTEL_MSIC_GPIO1LV2CTLI 0x05d -#define INTEL_MSIC_GPIO1LV1CTLI 0x05e -#define INTEL_MSIC_GPIO1LV0CTLI 0x05f -#define INTEL_MSIC_PWM0CLKDIV1 0x061 -#define INTEL_MSIC_PWM0CLKDIV0 0x062 -#define INTEL_MSIC_PWM1CLKDIV1 0x063 -#define INTEL_MSIC_PWM1CLKDIV0 0x064 -#define INTEL_MSIC_PWM2CLKDIV1 0x065 -#define INTEL_MSIC_PWM2CLKDIV0 0x066 -#define INTEL_MSIC_PWM0DUTYCYCLE 0x067 -#define INTEL_MSIC_PWM1DUTYCYCLE 0x068 -#define INTEL_MSIC_PWM2DUTYCYCLE 0x069 -#define INTEL_MSIC_GPIO0HV3CTLO 0x06d -#define INTEL_MSIC_GPIO0HV2CTLO 0x06e -#define INTEL_MSIC_GPIO0HV1CTLO 0x06f -#define INTEL_MSIC_GPIO0HV0CTLO 0x070 -#define INTEL_MSIC_GPIO1HV3CTLO 0x071 -#define INTEL_MSIC_GPIO1HV2CTLO 0x072 -#define INTEL_MSIC_GPIO1HV1CTLO 0x073 -#define INTEL_MSIC_GPIO1HV0CTLO 0x074 -#define INTEL_MSIC_GPIO0HV3CTLI 0x075 -#define INTEL_MSIC_GPIO0HV2CTLI 0x076 -#define INTEL_MSIC_GPIO0HV1CTLI 0x077 -#define INTEL_MSIC_GPIO0HV0CTLI 0x078 -#define INTEL_MSIC_GPIO1HV3CTLI 0x079 -#define INTEL_MSIC_GPIO1HV2CTLI 0x07a -#define INTEL_MSIC_GPIO1HV1CTLI 0x07b -#define INTEL_MSIC_GPIO1HV0CTLI 0x07c - -/* SVID */ -#define INTEL_MSIC_SVIDCTRL0 0x080 -#define INTEL_MSIC_SVIDCTRL1 0x081 -#define INTEL_MSIC_SVIDCTRL2 0x082 -#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */ -#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */ -#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */ -#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */ -#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087 -#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088 -#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089 -#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a -#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b -#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c -#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */ -#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */ -#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */ -#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */ -#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */ -#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */ -#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */ -#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */ - -/* VREG */ -#define INTEL_MSIC_VCCLATCH 0x0c0 -#define INTEL_MSIC_VNNLATCH 0x0c1 -#define INTEL_MSIC_VCCCNT 0x0c2 -#define INTEL_MSIC_SMPSRAMP 0x0c3 -#define INTEL_MSIC_VNNCNT 0x0c4 -#define INTEL_MSIC_VNNAONCNT 0x0c5 -#define INTEL_MSIC_VCC122AONCNT 0x0c6 -#define INTEL_MSIC_V180AONCNT 0x0c7 -#define INTEL_MSIC_V500CNT 0x0c8 -#define INTEL_MSIC_VIHFCNT 0x0c9 -#define INTEL_MSIC_LDORAMP1 0x0ca -#define INTEL_MSIC_LDORAMP2 0x0cb -#define INTEL_MSIC_VCC108AONCNT 0x0cc -#define INTEL_MSIC_VCC108ASCNT 0x0cd -#define INTEL_MSIC_VCC108CNT 0x0ce -#define INTEL_MSIC_VCCA100ASCNT 0x0cf -#define INTEL_MSIC_VCCA100CNT 0x0d0 -#define INTEL_MSIC_VCC180AONCNT 0x0d1 -#define INTEL_MSIC_VCC180CNT 0x0d2 -#define INTEL_MSIC_VCC330CNT 0x0d3 -#define INTEL_MSIC_VUSB330CNT 0x0d4 -#define INTEL_MSIC_VCCSDIOCNT 0x0d5 -#define INTEL_MSIC_VPROG1CNT 0x0d6 -#define INTEL_MSIC_VPROG2CNT 0x0d7 -#define INTEL_MSIC_VEMMCSCNT 0x0d8 -#define INTEL_MSIC_VEMMC1CNT 0x0d9 -#define INTEL_MSIC_VEMMC2CNT 0x0da -#define INTEL_MSIC_VAUDACNT 0x0db -#define INTEL_MSIC_VHSPCNT 0x0dc -#define INTEL_MSIC_VHSNCNT 0x0dd -#define INTEL_MSIC_VHDMICNT 0x0de -#define INTEL_MSIC_VOTGCNT 0x0df -#define INTEL_MSIC_V1P35CNT 0x0e0 -#define INTEL_MSIC_V330AONCNT 0x0e1 - -/* RESET */ -#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */ -#define INTEL_MSIC_ERCONFIG 0x101 - -/* BURST */ -#define INTEL_MSIC_BATCURRENTLIMIT12 0x102 -#define INTEL_MSIC_BATTIMELIMIT12 0x103 -#define INTEL_MSIC_BATTIMELIMIT3 0x104 -#define INTEL_MSIC_BATTIMEDB 0x105 -#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106 -#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107 -#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108 - -/* RTC */ -#define INTEL_MSIC_RTCB1 0x140 /* RO */ -#define INTEL_MSIC_RTCB2 0x141 /* RO */ -#define INTEL_MSIC_RTCB3 0x142 /* RO */ -#define INTEL_MSIC_RTCB4 0x143 /* RO */ -#define INTEL_MSIC_RTCOB1 0x144 -#define INTEL_MSIC_RTCOB2 0x145 -#define INTEL_MSIC_RTCOB3 0x146 -#define INTEL_MSIC_RTCOB4 0x147 -#define INTEL_MSIC_RTCAB1 0x148 -#define INTEL_MSIC_RTCAB2 0x149 -#define INTEL_MSIC_RTCAB3 0x14a -#define INTEL_MSIC_RTCAB4 0x14b -#define INTEL_MSIC_RTCWAB1 0x14c -#define INTEL_MSIC_RTCWAB2 0x14d -#define INTEL_MSIC_RTCWAB3 0x14e -#define INTEL_MSIC_RTCWAB4 0x14f -#define INTEL_MSIC_RTCSC1 0x150 -#define INTEL_MSIC_RTCSC2 0x151 -#define INTEL_MSIC_RTCSC3 0x152 -#define INTEL_MSIC_RTCSC4 0x153 -#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */ -#define INTEL_MSIC_RTCCONFIG1 0x155 -#define INTEL_MSIC_RTCCONFIG2 0x156 - -/* CHARGER */ -#define INTEL_MSIC_BDTIMER 0x180 -#define INTEL_MSIC_BATTRMV 0x181 -#define INTEL_MSIC_VBUSDET 0x182 -#define INTEL_MSIC_VBUSDET1 0x183 -#define INTEL_MSIC_ADPHVDET 0x184 -#define INTEL_MSIC_ADPLVDET 0x185 -#define INTEL_MSIC_ADPDETDBDM 0x186 -#define INTEL_MSIC_LOWBATTDET 0x187 -#define INTEL_MSIC_CHRCTRL 0x188 -#define INTEL_MSIC_CHRCVOLTAGE 0x189 -#define INTEL_MSIC_CHRCCURRENT 0x18a -#define INTEL_MSIC_SPCHARGER 0x18b -#define INTEL_MSIC_CHRTTIME 0x18c -#define INTEL_MSIC_CHRCTRL1 0x18d -#define INTEL_MSIC_PWRSRCLMT 0x18e -#define INTEL_MSIC_CHRSTWDT 0x18f -#define INTEL_MSIC_WDTWRITE 0x190 /* WO */ -#define INTEL_MSIC_CHRSAFELMT 0x191 -#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */ -#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */ -#define INTEL_MSIC_CHRLEDPWM 0x194 -#define INTEL_MSIC_CHRLEDCTRL 0x195 - -/* ADC */ -#define INTEL_MSIC_ADC1CNTL1 0x1c0 -#define INTEL_MSIC_ADC1CNTL2 0x1c1 -#define INTEL_MSIC_ADC1CNTL3 0x1c2 -#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */ -#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */ -#define INTEL_MSIC_ADC1ADDR0 0x1c5 -#define INTEL_MSIC_ADC1ADDR1 0x1c6 -#define INTEL_MSIC_ADC1ADDR2 0x1c7 -#define INTEL_MSIC_ADC1ADDR3 0x1c8 -#define INTEL_MSIC_ADC1ADDR4 0x1c9 -#define INTEL_MSIC_ADC1ADDR5 0x1ca -#define INTEL_MSIC_ADC1ADDR6 0x1cb -#define INTEL_MSIC_ADC1ADDR7 0x1cc -#define INTEL_MSIC_ADC1ADDR8 0x1cd -#define INTEL_MSIC_ADC1ADDR9 0x1ce -#define INTEL_MSIC_ADC1ADDR10 0x1cf -#define INTEL_MSIC_ADC1ADDR11 0x1d0 -#define INTEL_MSIC_ADC1ADDR12 0x1d1 -#define INTEL_MSIC_ADC1ADDR13 0x1d2 -#define INTEL_MSIC_ADC1ADDR14 0x1d3 -#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */ -#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */ -#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */ -#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */ -#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */ -#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */ -#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */ -#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */ -#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */ -#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */ -#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */ -#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */ -#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */ -#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */ -#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */ -#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */ -#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */ -#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */ -#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */ -#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */ -#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */ -#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */ -#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */ -#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */ -#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */ -#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */ -#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */ -#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */ -#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */ -#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */ -#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */ -#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */ -#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */ -#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */ -#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */ -#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */ -#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */ -#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */ -#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */ -#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */ -#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */ -#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */ -#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */ -#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */ -#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */ -#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */ -#define INTEL_MSIC_CCCNTL 0x202 -#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */ -#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */ -#define INTEL_MSIC_CCADCHA 0x205 /* RO */ -#define INTEL_MSIC_CCADCLA 0x206 /* RO */ - -/* AUDIO */ -#define INTEL_MSIC_AUDPLLCTRL 0x240 -#define INTEL_MSIC_DMICBUF0123 0x241 -#define INTEL_MSIC_DMICBUF45 0x242 -#define INTEL_MSIC_DMICGPO 0x244 -#define INTEL_MSIC_DMICMUX 0x245 -#define INTEL_MSIC_DMICCLK 0x246 -#define INTEL_MSIC_MICBIAS 0x247 -#define INTEL_MSIC_ADCCONFIG 0x248 -#define INTEL_MSIC_MICAMP1 0x249 -#define INTEL_MSIC_MICAMP2 0x24a -#define INTEL_MSIC_NOISEMUX 0x24b -#define INTEL_MSIC_AUDIOMUX12 0x24c -#define INTEL_MSIC_AUDIOMUX34 0x24d -#define INTEL_MSIC_AUDIOSINC 0x24e -#define INTEL_MSIC_AUDIOTXEN 0x24f -#define INTEL_MSIC_HSEPRXCTRL 0x250 -#define INTEL_MSIC_IHFRXCTRL 0x251 -#define INTEL_MSIC_VOICETXVOL 0x252 -#define INTEL_MSIC_SIDETONEVOL 0x253 -#define INTEL_MSIC_MUSICSHARVOL 0x254 -#define INTEL_MSIC_VOICETXCTRL 0x255 -#define INTEL_MSIC_HSMIXER 0x256 -#define INTEL_MSIC_DACCONFIG 0x257 -#define INTEL_MSIC_SOFTMUTE 0x258 -#define INTEL_MSIC_HSLVOLCTRL 0x259 -#define INTEL_MSIC_HSRVOLCTRL 0x25a -#define INTEL_MSIC_IHFLVOLCTRL 0x25b -#define INTEL_MSIC_IHFRVOLCTRL 0x25c -#define INTEL_MSIC_DRIVEREN 0x25d -#define INTEL_MSIC_LINEOUTCTRL 0x25e -#define INTEL_MSIC_VIB1CTRL1 0x25f -#define INTEL_MSIC_VIB1CTRL2 0x260 -#define INTEL_MSIC_VIB1CTRL3 0x261 -#define INTEL_MSIC_VIB1SPIPCM_1 0x262 -#define INTEL_MSIC_VIB1SPIPCM_2 0x263 -#define INTEL_MSIC_VIB1CTRL5 0x264 -#define INTEL_MSIC_VIB2CTRL1 0x265 -#define INTEL_MSIC_VIB2CTRL2 0x266 -#define INTEL_MSIC_VIB2CTRL3 0x267 -#define INTEL_MSIC_VIB2SPIPCM_1 0x268 -#define INTEL_MSIC_VIB2SPIPCM_2 0x269 -#define INTEL_MSIC_VIB2CTRL5 0x26a -#define INTEL_MSIC_BTNCTRL1 0x26b -#define INTEL_MSIC_BTNCTRL2 0x26c -#define INTEL_MSIC_PCM1TXSLOT01 0x26d -#define INTEL_MSIC_PCM1TXSLOT23 0x26e -#define INTEL_MSIC_PCM1TXSLOT45 0x26f -#define INTEL_MSIC_PCM1RXSLOT0123 0x270 -#define INTEL_MSIC_PCM1RXSLOT045 0x271 -#define INTEL_MSIC_PCM2TXSLOT01 0x272 -#define INTEL_MSIC_PCM2TXSLOT23 0x273 -#define INTEL_MSIC_PCM2TXSLOT45 0x274 -#define INTEL_MSIC_PCM2RXSLOT01 0x275 -#define INTEL_MSIC_PCM2RXSLOT23 0x276 -#define INTEL_MSIC_PCM2RXSLOT45 0x277 -#define INTEL_MSIC_PCM1CTRL1 0x278 -#define INTEL_MSIC_PCM1CTRL2 0x279 -#define INTEL_MSIC_PCM1CTRL3 0x27a -#define INTEL_MSIC_PCM2CTRL1 0x27b -#define INTEL_MSIC_PCM2CTRL2 0x27c - -/* HDMI */ -#define INTEL_MSIC_HDMIPUEN 0x280 -#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */ - -/* Physical address of the start of the MSIC interrupt tree in SRAM */ -#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0 - -/** - * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver - * @gpio_base: base number for the GPIOs - */ -struct intel_msic_gpio_pdata { - unsigned gpio_base; -}; - -/** - * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver - * @gpio: GPIO number used for OCD interrupts - * - * The MSIC MFD driver converts @gpio into an IRQ number and passes it to - * the OCD driver as %IORESOURCE_IRQ. - */ -struct intel_msic_ocd_pdata { - unsigned gpio; -}; - -/* MSIC embedded blocks (subdevices) */ -enum intel_msic_block { - INTEL_MSIC_BLOCK_TOUCH, - INTEL_MSIC_BLOCK_ADC, - INTEL_MSIC_BLOCK_BATTERY, - INTEL_MSIC_BLOCK_GPIO, - INTEL_MSIC_BLOCK_AUDIO, - INTEL_MSIC_BLOCK_HDMI, - INTEL_MSIC_BLOCK_THERMAL, - INTEL_MSIC_BLOCK_POWER_BTN, - INTEL_MSIC_BLOCK_OCD, - - INTEL_MSIC_BLOCK_LAST, -}; - -/** - * struct intel_msic_platform_data - platform data for the MSIC driver - * @irq: array of interrupt numbers, one per device. If @irq is set to %0 - * for a given block, the corresponding platform device is not - * created. For devices which don't have an interrupt, use %0xff - * (this is same as in SFI spec). - * @gpio: platform data for the MSIC GPIO driver - * @ocd: platform data for the MSIC OCD driver - * - * Once the MSIC driver is initialized, the register interface is ready to - * use. All the platform devices for subdevices are created after the - * register interface is ready so that we can guarantee its availability to - * the subdevice drivers. - * - * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ - * resources of the created platform device. - */ -struct intel_msic_platform_data { - int irq[INTEL_MSIC_BLOCK_LAST]; - struct intel_msic_gpio_pdata *gpio; - struct intel_msic_ocd_pdata *ocd; -}; - -struct intel_msic; - -extern int intel_msic_reg_read(unsigned short reg, u8 *val); -extern int intel_msic_reg_write(unsigned short reg, u8 val); -extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask); -extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count); -extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count); - -/* - * pdev_to_intel_msic - gets an MSIC instance from the platform device - * @pdev: platform device pointer - * - * The client drivers need to have pointer to the MSIC instance if they - * want to call intel_msic_irq_read(). This macro can be used for - * convenience to get the MSIC pointer from @pdev where needed. This is - * _only_ valid for devices which are managed by the MSIC. - */ -#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent)) - -extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, - u8 *val); - -#endif /* __LINUX_MFD_INTEL_MSIC_H__ */ diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h index 043d3b6de9ec..5ced55eae11b 100644 --- a/include/linux/mfd/iqs62x.h +++ b/include/linux/mfd/iqs62x.h @@ -28,7 +28,7 @@ #define IQS620_GLBL_EVENT_MASK_PMU BIT(6) #define IQS62X_NUM_KEYS 16 -#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 5) +#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6) #define IQS62X_EVENT_SIZE 10 @@ -78,6 +78,7 @@ enum iqs62x_event_flag { /* everything else */ IQS62X_EVENT_SYS_RESET, + IQS62X_EVENT_SYS_ATI, }; struct iqs62x_event_data { @@ -97,12 +98,10 @@ struct iqs62x_dev_desc { const char *dev_name; const struct mfd_cell *sub_devs; int num_sub_devs; - u8 prod_num; u8 sw_num; const u8 *cal_regs; int num_cal_regs; - u8 prox_mask; u8 sar_mask; u8 hall_mask; @@ -110,16 +109,12 @@ struct iqs62x_dev_desc { u8 temp_mask; u8 als_mask; u8 ir_mask; - u8 prox_settings; u8 als_flags; u8 hall_flags; u8 hyst_shift; - u8 interval; u8 interval_div; - - u8 clk_div; const char *fw_name; const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE]; }; @@ -130,8 +125,10 @@ struct iqs62x_core { struct regmap *regmap; struct blocking_notifier_head nh; struct list_head fw_blk_head; + struct completion ati_done; struct completion fw_done; enum iqs62x_ui_sel ui_sel; + unsigned long event_cache; }; extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS]; diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h index 4283b5b33e04..66f673c35303 100644 --- a/include/linux/mfd/rohm-generic.h +++ b/include/linux/mfd/rohm-generic.h @@ -12,6 +12,8 @@ enum rohm_chip_type { ROHM_CHIP_TYPE_BD71847, ROHM_CHIP_TYPE_BD70528, ROHM_CHIP_TYPE_BD71828, + ROHM_CHIP_TYPE_BD9571, + ROHM_CHIP_TYPE_BD9574, ROHM_CHIP_TYPE_AMOUNT }; @@ -20,14 +22,12 @@ struct rohm_regmap_dev { struct regmap *regmap; }; -enum { - ROHM_DVS_LEVEL_UNKNOWN, - ROHM_DVS_LEVEL_RUN, - ROHM_DVS_LEVEL_IDLE, - ROHM_DVS_LEVEL_SUSPEND, - ROHM_DVS_LEVEL_LPSR, - ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR, -}; +#define ROHM_DVS_LEVEL_RUN BIT(0) +#define ROHM_DVS_LEVEL_IDLE BIT(1) +#define ROHM_DVS_LEVEL_SUSPEND BIT(2) +#define ROHM_DVS_LEVEL_LPSR BIT(3) +#define ROHM_DVS_LEVEL_VALID_AMOUNT 4 +#define ROHM_DVS_LEVEL_UNKNOWN 0 /** * struct rohm_dvs_config - dynamic voltage scaling register descriptions diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 562862ff819c..d26acc8b21cd 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -279,7 +279,7 @@ struct mhi_controller_config { u32 num_channels; const struct mhi_channel_config *ch_cfg; u32 num_events; - const struct mhi_event_config *event_cfg; + struct mhi_event_config *event_cfg; bool use_bounce_buf; bool m2_no_db; }; @@ -347,12 +347,14 @@ struct mhi_controller_config { * @unmap_single: CB function to destroy TRE buffer * @read_reg: Read a MHI register via the physical link (required) * @write_reg: Write a MHI register via the physical link (required) + * @reset: Controller specific reset function (optional) * @buffer_len: Bounce buffer length * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) * @pre_init: MHI host needs to do pre-initialization before power up * @wake_set: Device wakeup set flag + * @irq_flags: irq flags passed to request_irq (optional) * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@ -437,6 +439,7 @@ struct mhi_controller { u32 *out); void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 val); + void (*reset)(struct mhi_controller *mhi_cntrl); size_t buffer_len; int index; @@ -444,6 +447,7 @@ struct mhi_controller { bool fbc_download; bool pre_init; bool wake_set; + unsigned long irq_flags; }; /** @@ -599,6 +603,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); /** + * mhi_get_free_desc_count - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_free_desc_count(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** * mhi_prepare_for_power_up - Do pre-initialization before power up. * This is optional, call this before power up if * the controller does not want bus framework to @@ -673,6 +686,13 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); /** + * mhi_soc_reset - Trigger a device reset. This can be used as a last resort + * to reset and recover a device. + * @mhi_cntrl: MHI controller + */ +void mhi_soc_reset(struct mhi_controller *mhi_cntrl); + +/** * mhi_device_get - Disable device low power mode * @mhi_dev: Device associated with the channel */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 4594838a0f7c..3a389633b68f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -89,7 +89,7 @@ extern int PageMovable(struct page *page); extern void __SetPageMovable(struct page *page, struct address_space *mapping); extern void __ClearPageMovable(struct page *page); #else -static inline int PageMovable(struct page *page) { return 0; }; +static inline int PageMovable(struct page *page) { return 0; } static inline void __SetPageMovable(struct page *page, struct address_space *mapping) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f1de49d64a98..dc3d2508f5c6 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -359,6 +359,10 @@ enum mlx5_event { MLX5_EVENT_TYPE_MAX = 0x100, }; +enum mlx5_driver_event { + MLX5_DRIVER_EVENT_TYPE_TRAP = 0, +}; + enum { MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, @@ -578,7 +582,10 @@ struct mlx5_init_seg { __be32 internal_timer_l; __be32 rsvd3[2]; __be32 health_counter; - __be32 rsvd4[1019]; + __be32 rsvd4[11]; + __be32 real_time_h; + __be32 real_time_l; + __be32 rsvd5[1006]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; @@ -899,6 +906,11 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) +{ + return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; +} + #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f93bfe7473aa..53b89631a1d9 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -143,6 +143,7 @@ enum { MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MTUTC = 0x9055, MLX5_REG_MPEGC = 0x9056, MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, @@ -193,7 +194,8 @@ enum port_state_policy { enum mlx5_coredev_type { MLX5_COREDEV_PF, - MLX5_COREDEV_VF + MLX5_COREDEV_VF, + MLX5_COREDEV_SF, }; struct mlx5_field_desc { @@ -305,13 +307,6 @@ struct mlx5_cmd { struct mlx5_cmd_stats *stats; }; -struct mlx5_port_caps { - int gid_table_len; - int pkey_table_len; - u8 ext_port_cap; - bool has_smi; -}; - struct mlx5_cmd_mailbox { void *buf; dma_addr_t dma; @@ -373,6 +368,8 @@ struct mlx5_core_mkey { u32 key; u32 pd; u32 type; + struct wait_queue_head wait; + refcount_t usecount; }; #define MLX5_24BIT_MASK ((1 << 24) - 1) @@ -507,6 +504,10 @@ struct mlx5_devcom; struct mlx5_fw_reset; struct mlx5_eq_table; struct mlx5_irq_table; +struct mlx5_vhca_state_notifier; +struct mlx5_sf_dev_table; +struct mlx5_sf_hw_table; +struct mlx5_sf_table; struct mlx5_rate_limit { u32 rate; @@ -564,6 +565,7 @@ struct mlx5_priv { int host_pf_pages; struct mlx5_core_health health; + struct list_head traps; /* start: qp staff */ struct dentry *qp_debugfs; @@ -582,7 +584,6 @@ struct mlx5_priv { /* end: alloc staff */ struct dentry *dbg_root; - struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; struct mlx5_adev **adev; @@ -603,6 +604,15 @@ struct mlx5_priv { struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; +#ifdef CONFIG_MLX5_SF + struct mlx5_vhca_state_notifier *vhca_state_notifier; + struct mlx5_sf_dev_table *sf_dev_table; + struct mlx5_core_dev *parent_mdev; +#endif +#ifdef CONFIG_MLX5_SF_MANAGER + struct mlx5_sf_hw_table *sf_hw_table; + struct mlx5_sf_table *sf_table; +#endif }; enum mlx5_device_state { @@ -661,18 +671,22 @@ struct mlx5_pps { u8 enabled; }; -struct mlx5_clock { - struct mlx5_nb pps_nb; - seqlock_t lock; +struct mlx5_timer { struct cyclecounter cycles; struct timecounter tc; - struct hwtstamp_config hwtstamp_config; u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; +}; + +struct mlx5_clock { + struct mlx5_nb pps_nb; + seqlock_t lock; + struct hwtstamp_config hwtstamp_config; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; + struct mlx5_timer timer; }; struct mlx5_dm; @@ -694,7 +708,6 @@ struct mlx5_core_dev { u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; struct { u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; @@ -1072,11 +1085,26 @@ enum { MAX_MR_CACHE_ENTRIES }; +/* Async-atomic event notifier used by mlx5 core to forward FW + * evetns recived from event queue to mlx5 consumers. + * Optimise event queue dipatching. + */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); + +/* Async-atomic event notifier used for forwarding + * evetns from the event queue into the to mlx5 events dispatcher, + * eswitch, clock and others. + */ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +/* Blocking event notifier used to forward SW events, used for slow path */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data); + int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); @@ -1209,22 +1237,4 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) return val.vbool; } -/** - * mlx5_core_net - Provide net namespace of the mlx5_core_dev - * @dev: mlx5 core device - * - * mlx5_core_net() returns the net namespace of mlx5 core device. - * This can be called only in below described limited context. - * (a) When a devlink instance for mlx5_core is registered and - * when devlink reload operation is disabled. - * or - * (b) during devlink reload reload_down() and reload_up callbacks - * where it is ensured that devlink instance's net namespace is - * stable. - */ -static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) -{ - return devlink_net(priv_to_devlink(dev)); -} - #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 29fd832950e0..994c2c8cb4fd 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -96,6 +96,35 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num); +u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, + u16 vport_num); + +/* Reg C1 usage: + * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) > + * + * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is + * encapsulation tunnel options, and the lowest 8 bits are used for zone id. + * + * Zone id is used to restore CT flow when packet misses on chain. + * + * Tunnel id and options are used together to restore the tunnel info metadata + * on miss and to support inner header rewrite by means of implicit chain 0 + * flows. + */ +#define ESW_ZONE_ID_BITS 8 +#define ESW_TUN_OPTS_BITS 12 +#define ESW_TUN_ID_BITS 12 +#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS +#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET +#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0) +#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET) +#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET) +#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ +#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */ +#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ + ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) +#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK + u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 442c0160caab..df5d91c8b2d4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -842,11 +842,16 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_4[0x1]; u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_typical_size[0x1]; - u8 reserved_at_7[0x4]; + u8 reserved_at_7[0x1]; + u8 nic_sq_scheduling[0x1]; + u8 nic_bw_share[0x1]; + u8 nic_rate_limit[0x1]; u8 packet_pacing_uid[0x1]; u8 reserved_at_c[0x14]; - u8 reserved_at_20[0x20]; + u8 reserved_at_20[0xb]; + u8 log_max_qos_nic_queue_group[0x5]; + u8 reserved_at_30[0x10]; u8 packet_pacing_max_rate[0x20]; @@ -932,11 +937,18 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 reserved_at_200[0x600]; }; +enum { + MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, + MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, + MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, +}; + struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; u8 reserved_at_1[0x3]; u8 sw_r_roce_src_udp_port[0x1]; - u8 reserved_at_5[0x1b]; + u8 reserved_at_5[0x19]; + u8 qp_ts_format[0x2]; u8 reserved_at_20[0x60]; @@ -1253,6 +1265,18 @@ enum { MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, }; +enum { + MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, + MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, + MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, +}; + +enum { + MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, + MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, + MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x1f]; u8 vhca_resource_manager[0x1]; @@ -1278,7 +1302,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_a0[0x3]; u8 ece_support[0x1]; - u8 reserved_at_a4[0x7]; + u8 reserved_at_a4[0x5]; + u8 reg_c_preserve[0x1]; + u8 reserved_at_aa[0x1]; u8 log_max_srq[0x5]; u8 reserved_at_b0[0x1]; u8 uplink_follow[0x1]; @@ -1564,7 +1590,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 general_obj_types[0x40]; - u8 reserved_at_440[0x4]; + u8 sq_ts_format[0x2]; + u8 rq_ts_format[0x2]; u8 steering_format_version[0x4]; u8 create_qp_start_hint[0x18]; @@ -1634,7 +1661,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 sf_set_partition[0x1]; u8 reserved_at_682[0x1]; u8 log_max_sf[0x5]; - u8 reserved_at_688[0x8]; + u8 apu[0x1]; + u8 reserved_at_689[0x7]; u8 log_min_sf_size[0x8]; u8 max_num_sf_partitions[0x8]; @@ -2868,6 +2896,12 @@ enum { MLX5_QPC_CS_RES_UP_TO_64B = 0x2, }; +enum { + MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1, + MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, +}; + struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; @@ -2896,7 +2930,9 @@ struct mlx5_ifc_qpc_bits { u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; - u8 reserved_at_55[0x6]; + u8 reserved_at_55[0x3]; + u8 ts_format[0x2]; + u8 reserved_at_5a[0x1]; u8 rlky[0x1]; u8 ulp_stateless_offload_mode[0x4]; @@ -3312,6 +3348,12 @@ enum { MLX5_SQC_STATE_ERR = 0x3, }; +enum { + MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, + MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, + MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, +}; + struct mlx5_ifc_sqc_bits { u8 rlky[0x1]; u8 cd_master[0x1]; @@ -3323,7 +3365,9 @@ struct mlx5_ifc_sqc_bits { u8 reg_umr[0x1]; u8 allow_swp[0x1]; u8 hairpin[0x1]; - u8 reserved_at_f[0x11]; + u8 reserved_at_f[0xb]; + u8 ts_format[0x2]; + u8 reserved_at_1c[0x4]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -3345,7 +3389,7 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_e0[0x10]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; - u8 reserved_at_110[0x10]; + u8 qos_queue_group_id[0x10]; u8 reserved_at_120[0x40]; @@ -3360,6 +3404,7 @@ enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, + SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4, }; enum { @@ -3414,6 +3459,12 @@ enum { MLX5_RQC_STATE_ERR = 0x3, }; +enum { + MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, + MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, + MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, +}; + struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; @@ -3424,7 +3475,9 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; u8 hairpin[0x1]; - u8 reserved_at_f[0x11]; + u8 reserved_at_f[0xb]; + u8 ts_format[0x2]; + u8 reserved_at_1c[0x4]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -3816,7 +3869,7 @@ struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; - u8 reserved_at_7[0x1]; + u8 apu_thread_cq[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -4803,6 +4856,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits { enum { SCHEDULING_HIERARCHY_E_SWITCH = 0x2, + SCHEDULING_HIERARCHY_NIC = 0x3, }; struct mlx5_ifc_query_scheduling_element_in_bits { @@ -5904,6 +5958,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_query_modify_header_context_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 modify_header_id[0x20]; + + u8 reserved_at_60[0xa0]; +}; + struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -9094,6 +9160,28 @@ struct mlx5_ifc_mpegc_reg_bits { u8 reserved_at_60[0x100]; }; +enum { + MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1, + MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2, + MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3, +}; + +struct mlx5_ifc_mtutc_reg_bits { + u8 reserved_at_0[0x1c]; + u8 operation[0x4]; + + u8 freq_adjustment[0x20]; + + u8 reserved_at_40[0x40]; + + u8 utc_sec[0x20]; + + u8 reserved_at_a0[0x2]; + u8 utc_nsec[0x1e]; + + u8 time_adjustment[0x20]; +}; + struct mlx5_ifc_pcam_enhanced_features_bits { u8 reserved_at_0[0x68]; u8 fec_50G_per_lane_in_pplm[0x1]; @@ -9152,7 +9240,9 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x6e]; + u8 reserved_at_0[0x6b]; + u8 ptpcyc2realtime_modify[0x1]; + u8 reserved_at_6c[0x2]; u8 pci_status_and_power[0x1]; u8 reserved_at_6f[0x5]; u8 mark_tx_action_cnp[0x1]; @@ -9175,7 +9265,8 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 regs_95_to_87[0x9]; u8 mpegc[0x1]; - u8 regs_85_to_68[0x12]; + u8 mtutc[0x1]; + u8 regs_84_to_68[0x11]; u8 tracer_registers[0x4]; u8 regs_63_to_32[0x20]; @@ -9908,6 +9999,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mcda_reg_bits mcda_reg; struct mlx5_ifc_mirc_reg_bits mirc_reg; struct mlx5_ifc_mfrl_reg_bits mfrl_reg; + struct mlx5_ifc_mtutc_reg_bits mtutc_reg; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h deleted file mode 100644 index 9c4bedc95504..000000000000 --- a/include/linux/mm-arch-hooks.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Generic mm no-op hooks. - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - */ -#ifndef _LINUX_MM_ARCH_HOOKS_H -#define _LINUX_MM_ARCH_HOOKS_H - -#include <asm/mm-arch-hooks.h> - -#ifndef arch_remap -static inline void arch_remap(struct mm_struct *mm, - unsigned long old_start, unsigned long old_end, - unsigned long new_start, unsigned long new_end) -{ -} -#define arch_remap arch_remap -#endif - -#endif /* _LINUX_MM_ARCH_HOOKS_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index ecdf8a8cd6ae..64a71bf20536 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -514,11 +514,14 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags) * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { - struct vm_area_struct *vma; /* Target VMA */ - unsigned int flags; /* FAULT_FLAG_xxx flags */ - gfp_t gfp_mask; /* gfp mask to be used for allocations */ - pgoff_t pgoff; /* Logical page offset based on vma */ - unsigned long address; /* Faulting virtual address */ + const struct { + struct vm_area_struct *vma; /* Target VMA */ + gfp_t gfp_mask; /* gfp mask to be used for allocations */ + pgoff_t pgoff; /* Logical page offset based on vma */ + unsigned long address; /* Faulting virtual address */ + }; + unsigned int flags; /* FAULT_FLAG_xxx flags + * XXX: should really be 'const' */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ pud_t *pud; /* Pointer to pud entry matching @@ -542,8 +545,8 @@ struct vm_fault { * is not NULL, otherwise pmd. */ pgtable_t prealloc_pte; /* Pre-allocated pte page table. - * vm_ops->map_pages() calls - * alloc_set_pte() from atomic context. + * vm_ops->map_pages() sets up a page + * table from atomic context. * do_fault_around() pre-allocates * page table to avoid allocation from * atomic context. @@ -578,7 +581,7 @@ struct vm_operations_struct { vm_fault_t (*fault)(struct vm_fault *vmf); vm_fault_t (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); - void (*map_pages)(struct vm_fault *vmf, + vm_fault_t (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); unsigned long (*pagesize)(struct vm_area_struct * area); @@ -590,7 +593,8 @@ struct vm_operations_struct { vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically - * for use by special VMAs that can switch between memory and hardware + * for use by special VMAs. See also generic_access_phys() for a generic + * implementation useful for any iomem mapping. */ int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); @@ -988,7 +992,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page); +vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); +void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); + vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif @@ -1181,6 +1187,9 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); +__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs, + unsigned int flags); + static inline __must_check bool try_get_page(struct page *page) { @@ -1291,6 +1300,27 @@ static inline bool page_maybe_dma_pinned(struct page *page) GUP_PIN_COUNTING_BIAS; } +static inline bool is_cow_mapping(vm_flags_t flags) +{ + return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; +} + +/* + * This should most likely only be called during fork() to see whether we + * should break the cow immediately for a page on the src mm. + */ +static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, + struct page *page) +{ + if (!is_cow_mapping(vma->vm_flags)) + return false; + + if (!atomic_read(&vma->vm_mm->has_pinned)) + return false; + + return page_maybe_dma_pinned(page); +} + #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif @@ -1584,7 +1614,7 @@ struct address_space *page_mapping_file(struct page *page); * ALLOC_NO_WATERMARKS and the low watermark was not * met implying that the system is under some pressure. */ -static inline bool page_is_pfmemalloc(struct page *page) +static inline bool page_is_pfmemalloc(const struct page *page) { /* * Page index cannot be this large so this must be @@ -1658,9 +1688,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp); int follow_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, - spinlock_t **ptlp); + pte_t **ptepp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, @@ -1754,48 +1786,6 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, bool bypass_rlim); -/* Container for pinned pfns / pages */ -struct frame_vector { - unsigned int nr_allocated; /* Number of frames we have space for */ - unsigned int nr_frames; /* Number of frames stored in ptrs array */ - bool got_ref; /* Did we pin pages by getting page ref? */ - bool is_pfns; /* Does array contain pages or pfns? */ - void *ptrs[]; /* Array of pinned pfns / pages. Use - * pfns_vector_pages() or pfns_vector_pfns() - * for access */ -}; - -struct frame_vector *frame_vector_create(unsigned int nr_frames); -void frame_vector_destroy(struct frame_vector *vec); -int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, - unsigned int gup_flags, struct frame_vector *vec); -void put_vaddr_frames(struct frame_vector *vec); -int frame_vector_to_pages(struct frame_vector *vec); -void frame_vector_to_pfns(struct frame_vector *vec); - -static inline unsigned int frame_vector_count(struct frame_vector *vec) -{ - return vec->nr_frames; -} - -static inline struct page **frame_vector_pages(struct frame_vector *vec) -{ - if (vec->is_pfns) { - int err = frame_vector_to_pages(vec); - - if (err) - return ERR_PTR(err); - } - return (struct page **)(vec->ptrs); -} - -static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) -{ - if (!vec->is_pfns) - frame_vector_to_pfns(vec); - return (unsigned long *)(vec->ptrs); -} - struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); @@ -2344,32 +2334,20 @@ extern void free_initmem(void); extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); -#ifdef CONFIG_HIGHMEM -/* - * Free a highmem page into the buddy system, adjusting totalhigh_pages - * and totalram_pages. - */ -extern void free_highmem_page(struct page *page); -#endif - extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void __free_reserved_page(struct page *page) +static inline void free_reserved_page(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); -} - -static inline void free_reserved_page(struct page *page) -{ - __free_reserved_page(page); adjust_managed_page_count(page, 1); } +#define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { @@ -2439,9 +2417,10 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, +extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); +extern void memmap_init_zone(struct zone *zone); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2622,7 +2601,7 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern vm_fault_t filemap_fault(struct vm_fault *vmf); -extern void filemap_map_pages(struct vm_fault *vmf, +extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); @@ -3177,5 +3156,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; +void mem_dump_obj(void *object); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8fc71e9d7bb0..355ea1ee32bd 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -24,7 +24,7 @@ static inline int page_is_file_lru(struct page *page) return !PageSwapBacked(page); } -static __always_inline void __update_lru_size(struct lruvec *lruvec, +static __always_inline void update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, int nr_pages) { @@ -33,76 +33,27 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); -} - -static __always_inline void update_lru_size(struct lruvec *lruvec, - enum lru_list lru, enum zone_type zid, - int nr_pages) -{ - __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif } -static __always_inline void add_page_to_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void add_page_to_lru_list_tail(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add_tail(&page->lru, &lruvec->lists[lru]); -} - -static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec, enum lru_list lru) -{ - list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); -} - /** - * page_lru_base_type - which LRU list type should a page be on? - * @page: the page to test - * - * Used for LRU list index arithmetic. - * - * Returns the base LRU type - file or anon - @page should be on. + * __clear_page_lru_flags - clear page lru flags before releasing a page + * @page: the page that was on lru and now has a zero reference */ -static inline enum lru_list page_lru_base_type(struct page *page) +static __always_inline void __clear_page_lru_flags(struct page *page) { - if (page_is_file_lru(page)) - return LRU_INACTIVE_FILE; - return LRU_INACTIVE_ANON; -} + VM_BUG_ON_PAGE(!PageLRU(page), page); -/** - * page_off_lru - which LRU list was page on? clearing its lru flags. - * @page: the page to test - * - * Returns the LRU list a page was on, as an index into the array of LRU - * lists; and clears its Unevictable or Active flags, ready for freeing. - */ -static __always_inline enum lru_list page_off_lru(struct page *page) -{ - enum lru_list lru; + __ClearPageLRU(page); - if (PageUnevictable(page)) { - __ClearPageUnevictable(page); - lru = LRU_UNEVICTABLE; - } else { - lru = page_lru_base_type(page); - if (PageActive(page)) { - __ClearPageActive(page); - lru += LRU_ACTIVE; - } - } - return lru; + /* this shouldn't happen, so leave the flags to bad_page() */ + if (PageActive(page) && PageUnevictable(page)) + return; + + __ClearPageActive(page); + __ClearPageUnevictable(page); } /** @@ -116,13 +67,41 @@ static __always_inline enum lru_list page_lru(struct page *page) { enum lru_list lru; + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + if (PageUnevictable(page)) - lru = LRU_UNEVICTABLE; - else { - lru = page_lru_base_type(page); - if (PageActive(page)) - lru += LRU_ACTIVE; - } + return LRU_UNEVICTABLE; + + lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; + if (PageActive(page)) + lru += LRU_ACTIVE; + return lru; } + +static __always_inline void add_page_to_lru_list(struct page *page, + struct lruvec *lruvec) +{ + enum lru_list lru = page_lru(page); + + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); + list_add(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec) +{ + enum lru_list lru = page_lru(page); + + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void del_page_from_lru_list(struct page *page, + struct lruvec *lruvec) +{ + list_del(&page->lru); + update_lru_size(lruvec, page_lru(page), page_zonenum(page), + -thp_nr_pages(page)); +} #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 07d9acb5b19c..6613b26a8894 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -23,6 +23,7 @@ #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) +#define INIT_PASID 0 struct address_space; struct mem_cgroup; @@ -588,10 +589,9 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) } struct mmu_gather; -extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end); -extern void tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end); +extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); +extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); +extern void tlb_finish_mmu(struct mmu_gather *tlb); static inline void init_tlb_flush_pending(struct mm_struct *mm) { diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 42df06c6b19c..f9ad35dd6012 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -311,7 +311,6 @@ struct mmc_card { struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ unsigned int nr_parts; - unsigned int bouncesz; /* Bounce buffer size */ struct workqueue_struct *complete_wq; /* Private workqueue */ }; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 29aa50711626..ab19245e9945 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -162,6 +162,12 @@ struct mmc_request { bool cap_cmd_during_tfr; int tag; + +#ifdef CONFIG_MMC_CRYPTO + bool crypto_enabled; + int crypto_key_slot; + u32 data_unit_num; +#endif }; struct mmc_card; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 01bba36545c5..26a3c7bc29ae 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -15,6 +15,7 @@ #include <linux/mmc/card.h> #include <linux/mmc/pm.h> #include <linux/dma-direction.h> +#include <linux/keyslot-manager.h> struct mmc_ios { unsigned int clock; /* clock rate */ @@ -79,6 +80,17 @@ struct mmc_ios { bool enhanced_strobe; /* hs400es selection */ }; +struct mmc_clk_phase { + bool valid; + u16 in_deg; + u16 out_deg; +}; + +#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1) +struct mmc_clk_phase_map { + struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES]; +}; + struct mmc_host; struct mmc_host_ops { @@ -384,6 +396,11 @@ struct mmc_host { #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ #define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ +#ifdef CONFIG_MMC_CRYPTO +#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */ +#else +#define MMC_CAP2_CRYPTO 0 +#endif int fixed_drv_type; /* fixed driver type for non-removable media */ @@ -412,7 +429,6 @@ struct mmc_host { unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ - unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ unsigned int can_dma_map_merge:1; /* merging can be used */ @@ -478,6 +494,11 @@ struct mmc_host { bool cqe_enabled; bool cqe_on; + /* Inline encryption support */ +#ifdef CONFIG_MMC_CRYPTO + struct blk_keyslot_manager ksm; +#endif + /* Host Software Queue support */ bool hsq_enabled; @@ -490,6 +511,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); +void mmc_of_parse_clk_phase(struct mmc_host *host, + struct mmc_clk_phase_map *map); int mmc_of_parse(struct mmc_host *host); int mmc_of_parse_voltage(struct device_node *np, u32 *mask); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b593316bff3d..47946cec7584 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -206,10 +206,30 @@ enum node_stat_item { NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ +#ifdef CONFIG_SWAP + NR_SWAPCACHE, +#endif NR_VM_NODE_STAT_ITEMS }; /* + * Returns true if the item should be printed in THPs (/proc/vmstat + * currently prints number of anon, file and shmem THPs. But the item + * is charged in pages). + */ +static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) +{ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return false; + + return item == NR_ANON_THPS || + item == NR_FILE_THPS || + item == NR_SHMEM_THPS || + item == NR_SHMEM_PMDMAPPED || + item == NR_FILE_PMDMAPPED; +} + +/* * Returns true if the value is measured in bytes (most vmstat values are * measured in pages). This defines the API part, the internal representation * might be different. @@ -483,6 +503,9 @@ struct zone { * bootmem allocator): * managed_pages = present_pages - reserved_pages; * + * cma pages is present pages that are assigned for CMA use + * (MIGRATE_CMA). + * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used @@ -507,6 +530,9 @@ struct zone { atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; +#ifdef CONFIG_CMA + unsigned long cma_pages; +#endif const char *name; @@ -604,6 +630,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone) return (unsigned long)atomic_long_read(&zone->managed_pages); } +static inline unsigned long zone_cma_pages(struct zone *zone) +{ +#ifdef CONFIG_CMA + return zone->cma_pages; +#else + return 0; +#endif +} + static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; @@ -872,8 +907,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) #endif } -extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); - #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else @@ -885,6 +918,18 @@ static inline int local_memory_node(int node_id) { return node_id; }; */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) +#ifdef CONFIG_ZONE_DEVICE +static inline bool zone_is_zone_device(struct zone *zone) +{ + return zone_idx(zone) == ZONE_DEVICE; +} +#else +static inline bool zone_is_zone_device(struct zone *zone) +{ + return false; +} +#endif + /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than @@ -1273,13 +1318,14 @@ extern size_t mem_section_usage_size(void); * which results in PFN_SECTION_SHIFT equal 6. * To sum it up, at least 6 bits are available. */ -#define SECTION_MARKED_PRESENT (1UL<<0) -#define SECTION_HAS_MEM_MAP (1UL<<1) -#define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_IS_EARLY (1UL<<3) -#define SECTION_MAP_LAST_BIT (1UL<<4) -#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 3 +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_IS_ONLINE (1UL<<2) +#define SECTION_IS_EARLY (1UL<<3) +#define SECTION_TAINT_ZONE_DEVICE (1UL<<4) +#define SECTION_MAP_LAST_BIT (1UL<<5) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) +#define SECTION_NID_SHIFT 3 static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1318,6 +1364,13 @@ static inline int online_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_IS_ONLINE)); } +static inline int online_device_section(struct mem_section *section) +{ + unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; + + return section && ((section->section_mem_map & flags) == flags); +} + static inline int online_section_nr(unsigned long nr) { return online_section(__nr_to_section(nr)); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index c425290b21e2..7d45b5f989b0 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -846,4 +846,46 @@ struct auxiliary_device_id { kernel_ulong_t driver_data; }; +/* Surface System Aggregator Module */ + +#define SSAM_MATCH_TARGET 0x1 +#define SSAM_MATCH_INSTANCE 0x2 +#define SSAM_MATCH_FUNCTION 0x4 + +struct ssam_device_id { + __u8 match_flags; + + __u8 domain; + __u8 category; + __u8 target; + __u8 instance; + __u8 function; + + kernel_ulong_t driver_data; +}; + +/* + * DFL (Device Feature List) + * + * DFL defines a linked list of feature headers within the device MMIO space to + * provide an extensible way of adding features. Software can walk through these + * predefined data structures to enumerate features. It is now used in the FPGA. + * See Documentation/fpga/dfl.rst for more information. + * + * The dfl bus type is introduced to match the individual feature devices (dfl + * devices) for specific dfl drivers. + */ + +/** + * struct dfl_device_id - dfl device identifier + * @type: DFL FIU type of the device. See enum dfl_id_type. + * @feature_id: feature identifier local to its DFL FIU type. + * @driver_data: driver specific data. + */ +struct dfl_device_id { + __u16 type; + __u16 feature_id; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 7a0bcb5b1ffc..59f094fa6f74 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -392,18 +392,6 @@ struct module { const s32 *gpl_crcs; bool using_gplonly_symbols; -#ifdef CONFIG_UNUSED_SYMBOLS - /* unused exported symbols. */ - const struct kernel_symbol *unused_syms; - const s32 *unused_crcs; - unsigned int num_unused_syms; - - /* GPL-only, unused exported symbols. */ - unsigned int num_unused_gpl_syms; - const struct kernel_symbol *unused_gpl_syms; - const s32 *unused_gpl_crcs; -#endif - #ifdef CONFIG_MODULE_SIG /* Signature was verified. */ bool sig_ok; @@ -411,11 +399,6 @@ struct module { bool async_probe_requested; - /* symbols that will be GPL-only in the near future. */ - const struct kernel_symbol *gpl_future_syms; - const s32 *gpl_future_crcs; - unsigned int num_gpl_future_syms; - /* Exception table */ unsigned int num_exentries; struct exception_table_entry *extable; @@ -550,8 +533,6 @@ static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) } #endif -extern struct mutex module_mutex; - /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code (IDE & SCSI) require entry into the module during init.*/ @@ -586,20 +567,9 @@ static inline bool within_module(unsigned long addr, const struct module *mod) return within_module_init(addr, mod) || within_module_core(addr, mod); } -/* Search for module by name: must hold module_mutex. */ +/* Search for module by name: must be in a RCU-sched critical section. */ struct module *find_module(const char *name); -struct symsearch { - const struct kernel_symbol *start, *stop; - const s32 *crcs; - enum mod_license { - NOT_GPL_ONLY, - GPL_ONLY, - WILL_BE_GPL_ONLY, - } license; - bool unused; -}; - /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -608,10 +578,6 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); -int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, unsigned long), - void *data); - extern void __noreturn __module_put_and_exit(struct module *mod, long code); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) @@ -795,14 +761,6 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) return 0; } -static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, - struct module *, - unsigned long), - void *data) -{ - return 0; -} - static inline int register_module_notifier(struct notifier_block *nb) { /* no events will happen anyway, so this can always succeed */ @@ -891,4 +849,8 @@ static inline bool module_sig_ok(struct module *module) } #endif /* CONFIG_MODULE_SIG */ +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + #endif /* _LINUX_MODULE_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h index aaf343b38671..5d92a7e1a742 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -72,14 +72,20 @@ struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; + struct user_namespace *mnt_userns; } __randomize_layout; +static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt) +{ + /* Pairs with smp_store_release() in do_idmap_mount(). */ + return smp_load_acquire(&mnt->mnt_userns); +} + struct file; /* forward dec */ struct path; extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); -extern int mnt_clone_write(struct vfsmount *mnt); extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); diff --git a/include/linux/msi.h b/include/linux/msi.h index 360a0a7e7341..aef35fd1cf11 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -178,6 +178,12 @@ struct msi_desc { list_for_each_entry((desc), dev_to_msi_list((dev)), list) #define for_each_msi_entry_safe(desc, tmp, dev) \ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) +#define for_each_msi_vector(desc, __irq, dev) \ + for_each_msi_entry((desc), (dev)) \ + if ((desc)->irq) \ + for (__irq = (desc)->irq; \ + __irq < ((desc)->irq + (desc)->nvec_used); \ + __irq++) #ifdef CONFIG_IRQ_MSI_IOMMU static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index d13958de6d8a..a0d572855444 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -53,6 +53,7 @@ #define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ #define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */ #define SPINOR_OP_SRST 0x99 /* Software Reset */ +#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */ /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index dcd185cbfe79..0cd631a19727 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -199,29 +199,4 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); -/* - * These values are chosen such that FAIL and SUCCESS match the - * values of the regular mutex_trylock(). - */ -enum mutex_trylock_recursive_enum { - MUTEX_TRYLOCK_FAILED = 0, - MUTEX_TRYLOCK_SUCCESS = 1, - MUTEX_TRYLOCK_RECURSIVE, -}; - -/** - * mutex_trylock_recursive - trylock variant that allows recursive locking - * @lock: mutex to be locked - * - * This function should not be used, _ever_. It is purely for hysterical GEM - * raisins, and once those are gone this will be removed. - * - * Returns: - * - MUTEX_TRYLOCK_FAILED - trylock failed, - * - MUTEX_TRYLOCK_SUCCESS - lock acquired, - * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. - */ -extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum -mutex_trylock_recursive(struct mutex *lock); - #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/namei.h b/include/linux/namei.h index a4bb992623c4..b9605b2b46e7 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -46,6 +46,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; #define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */ #define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ #define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ +#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) diff --git a/include/linux/nd.h b/include/linux/nd.h index 55c735997805..cec526c8043d 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -26,7 +26,7 @@ struct nd_device_driver { struct device_driver drv; unsigned long type; int (*probe)(struct device *dev); - int (*remove)(struct device *dev); + void (*remove)(struct device *dev); void (*shutdown)(struct device *dev); void (*notify)(struct device *dev, enum nvdimm_event event); }; diff --git a/include/linux/net.h b/include/linux/net.h index 9e2324efc26a..ba736b457a06 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -42,8 +42,6 @@ struct net; #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 -#define PROTO_CMSG_DATA_ONLY 0x0001 - #ifndef ARCH_HAS_SOCKET_TYPES /** * enum sock_type - Socket types @@ -138,7 +136,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, struct proto_ops { int family; - unsigned int flags; struct module *owner; int (*release) (struct socket *sock); int (*bind) (struct socket *sock, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 934de56644e7..3de38d6a0aea 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -84,6 +84,12 @@ enum { NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */ NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */ + NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */ + + NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */ + NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */ + NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */ + NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */ /* * Add your fresh new feature above and remember to update @@ -157,6 +163,11 @@ enum { #define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST) #define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST) #define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC) +#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD) +#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS) +#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM) +#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD) +#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP) /* Finds the next feature with the highest number of the range of start till 0. */ @@ -234,7 +245,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) /* Changeable features with no special hardware requirements that defaults to off. */ -#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST +#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD) #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_CTAG_RX | \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 259be67644e3..5b67ea89d5f2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -347,6 +347,7 @@ struct napi_struct { struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; + struct task_struct *thread; }; enum { @@ -358,6 +359,7 @@ enum { NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ + NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ }; enum { @@ -369,6 +371,7 @@ enum { NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), + NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), }; enum gro_result { @@ -376,7 +379,6 @@ enum gro_result { GRO_MERGED_FREE, GRO_HELD, GRO_NORMAL, - GRO_DROP, GRO_CONSUMED, }; typedef enum gro_result gro_result_t; @@ -495,6 +497,8 @@ static inline bool napi_complete(struct napi_struct *n) return napi_complete_done(n, 0); } +int dev_set_threaded(struct net_device *dev, bool threaded); + /** * napi_disable - prevent NAPI from scheduling * @n: NAPI context @@ -504,20 +508,7 @@ static inline bool napi_complete(struct napi_struct *n) */ void napi_disable(struct napi_struct *n); -/** - * napi_enable - enable NAPI scheduling - * @n: NAPI context - * - * Resume NAPI from being scheduled on this context. - * Must be paired with napi_disable. - */ -static inline void napi_enable(struct napi_struct *n) -{ - BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - smp_mb__before_atomic(); - clear_bit(NAPI_STATE_SCHED, &n->state); - clear_bit(NAPI_STATE_NPSVC, &n->state); -} +void napi_enable(struct napi_struct *n); /** * napi_synchronize - wait until NAPI is not running @@ -859,6 +850,7 @@ enum tc_setup_type { TC_SETUP_QDISC_ETS, TC_SETUP_QDISC_TBF, TC_SETUP_QDISC_FIFO, + TC_SETUP_QDISC_HTB, }; /* These structures hold the attributes of bpf state that are being passed @@ -1213,19 +1205,6 @@ struct netdev_net_notifier { * struct netdev_phys_item_id *ppid) * Called to get the parent ID of the physical port of this device. * - * void (*ndo_udp_tunnel_add)(struct net_device *dev, - * struct udp_tunnel_info *ti); - * Called by UDP tunnel to notify a driver about the UDP port and socket - * address family that a UDP tunnel is listnening to. It is called only - * when a new port starts listening. The operation is protected by the - * RTNL. - * - * void (*ndo_udp_tunnel_del)(struct net_device *dev, - * struct udp_tunnel_info *ti); - * Called by UDP tunnel to notify the driver about a UDP port and socket - * address family that the UDP tunnel is not listening to anymore. The - * operation is protected by the RTNL. - * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) * Called by upper layer devices to accelerate switching or other @@ -1412,6 +1391,8 @@ struct net_device_ops { struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, struct sk_buff *skb, bool all_slaves); + struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, + struct sock *sk); netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, @@ -1464,10 +1445,6 @@ struct net_device_ops { struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); - void (*ndo_udp_tunnel_add)(struct net_device *dev, - struct udp_tunnel_info *ti); - void (*ndo_udp_tunnel_del)(struct net_device *dev, - struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@ -1607,6 +1584,12 @@ enum netdev_priv_flags { #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK +/* Specifies the type of the struct net_device::ml_priv pointer */ +enum netdev_ml_priv_type { + ML_PRIV_NONE, + ML_PRIV_CAN, +}; + /** * struct net_device - The DEVICE structure. * @@ -1802,6 +1785,7 @@ enum netdev_priv_flags { * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private + * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics @@ -1842,6 +1826,8 @@ enum netdev_priv_flags { * * @wol_enabled: Wake-on-LAN is enabled * + * @threaded: napi threaded mode is enabled + * * @net_notifier_list: List of per-net netdev notifier block * that follow this device when it is moved * to another network namespace. @@ -1873,7 +1859,6 @@ struct net_device { unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; - int irq; /* * Some hardware also needs these fields (state,dev_list, @@ -1895,6 +1880,23 @@ struct net_device { struct list_head lower; } adj_list; + /* Read-mostly cache-line for fast-path access */ + unsigned int flags; + unsigned int priv_flags; + const struct net_device_ops *netdev_ops; + int ifindex; + unsigned short gflags; + unsigned short hard_header_len; + + /* Note : dev->mtu is often read without holding a lock. + * Writers usually hold RTNL. + * It is recommended to use READ_ONCE() to annotate the reads, + * and to use WRITE_ONCE() to annotate the writes. + */ + unsigned int mtu; + unsigned short needed_headroom; + unsigned short needed_tailroom; + netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; @@ -1903,10 +1905,15 @@ struct net_device { netdev_features_t mpls_features; netdev_features_t gso_partial_features; - int ifindex; + unsigned int min_mtu; + unsigned int max_mtu; + unsigned short type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; - struct net_device_stats stats; + struct net_device_stats stats; /* not used by modern drivers */ atomic_long_t rx_dropped; atomic_long_t tx_dropped; @@ -1920,7 +1927,6 @@ struct net_device { const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; #endif - const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; @@ -1939,34 +1945,12 @@ struct net_device { const struct header_ops *header_ops; - unsigned int flags; - unsigned int priv_flags; - - unsigned short gflags; - unsigned short padded; - unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; - /* Note : dev->mtu is often read without holding a lock. - * Writers usually hold RTNL. - * It is recommended to use READ_ONCE() to annotate the reads, - * and to use WRITE_ONCE() to annotate the writes. - */ - unsigned int mtu; - unsigned int min_mtu; - unsigned int max_mtu; - unsigned short type; - unsigned short hard_header_len; - unsigned char min_header_len; - unsigned char name_assign_type; - - unsigned short needed_headroom; - unsigned short needed_tailroom; - /* Interface address info. */ unsigned char perm_addr[MAX_ADDR_LEN]; unsigned char addr_assign_type; @@ -1977,7 +1961,10 @@ struct net_device { unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; + unsigned short padded; + spinlock_t addr_list_lock; + int irq; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; @@ -2114,8 +2101,10 @@ struct net_device { possible_net_t nd_net; /* mid-layer private */ + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + union { - void *ml_priv; struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; @@ -2159,6 +2148,7 @@ struct net_device { struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; + unsigned threaded:1; struct list_head net_notifier_list; @@ -2305,6 +2295,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev) netdev_set_rx_headroom(dev, -1); } +static inline void *netdev_get_ml_priv(struct net_device *dev, + enum netdev_ml_priv_type type) +{ + if (dev->ml_priv_type != type) + return NULL; + + return dev->ml_priv; +} + +static inline void netdev_set_ml_priv(struct net_device *dev, + void *ml_priv, + enum netdev_ml_priv_type type) +{ + WARN(dev->ml_priv_type && dev->ml_priv_type != type, + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", + dev->ml_priv_type, type); + WARN(!dev->ml_priv_type && dev->ml_priv, + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); + + dev->ml_priv = ml_priv; + dev->ml_priv_type = type; +} + /* * Net namespace inlines */ @@ -2633,6 +2646,7 @@ enum netdev_lag_hash { NETDEV_LAG_HASH_L23, NETDEV_LAG_HASH_E23, NETDEV_LAG_HASH_E34, + NETDEV_LAG_HASH_VLAN_SRCMAC, NETDEV_LAG_HASH_UNKNOWN, }; @@ -2876,6 +2890,8 @@ int init_dummy_netdev(struct net_device *dev); struct net_device *netdev_get_xmit_slave(struct net_device *dev, struct sk_buff *skb, bool all_slaves); +struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, + struct sock *sk); struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); @@ -3918,6 +3934,9 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); @@ -3940,18 +3959,44 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); -int xdp_umem_query(struct net_device *dev, u16 queue_id); - int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); +int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); +static __always_inline bool __is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb, + const bool check_mtu) +{ + const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ + unsigned int len; + + if (!(dev->flags & IFF_UP)) + return false; + + if (!check_mtu) + return true; + + len = dev->mtu + dev->hard_header_len + vlan_hdr_len; + if (skb->len <= len) + return true; + + /* if TSO is enabled, we don't care about the length as the packet + * could be forwarded without being segmented before + */ + if (skb_is_gso(skb)) + return true; + + return false; +} + static __always_inline int ____dev_forward_skb(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + const bool check_mtu) { if (skb_orphan_frags(skb, GFP_ATOMIC) || - unlikely(!is_skb_forwardable(dev, skb))) { + unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; @@ -4352,6 +4397,7 @@ static inline void netif_tx_disable(struct net_device *dev) local_bh_disable(); cpu = smp_processor_id(); + spin_lock(&dev->tx_global_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -4359,6 +4405,7 @@ static inline void netif_tx_disable(struct net_device *dev) netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } + spin_unlock(&dev->tx_global_lock); local_bh_enable(); } diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0101747de549..f0f3a8354c3c 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -463,8 +463,6 @@ extern struct nf_ct_hook __rcu *nf_ct_hook; struct nlattr; struct nfnl_ct_hook { - struct nf_conn *(*get_ct)(const struct sk_buff *skb, - enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 9f118771e248..0bcf98098c5a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -11,6 +11,8 @@ struct net; +void do_trace_netlink_extack(const char *msg); + static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) { return (struct nlmsghdr *)skb->data; @@ -90,6 +92,8 @@ struct netlink_ext_ack { static const char __msg[] = msg; \ struct netlink_ext_ack *__extack = (extack); \ \ + do_trace_netlink_extack(__msg); \ + \ if (__extack) \ __extack->_msg = __msg; \ } while (0) @@ -110,6 +114,8 @@ struct netlink_ext_ack { static const char __msg[] = msg; \ struct netlink_ext_ack *__extack = (extack); \ \ + do_trace_netlink_extack(__msg); \ + \ if (__extack) { \ __extack->_msg = __msg; \ __extack->bad_attr = (attr); \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 681ed98e4ba8..eadaabd18dc7 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -379,18 +379,20 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern int nfs_getattr(struct user_namespace *, const struct path *, + struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); -extern int nfs_permission(struct inode *, int); +extern int nfs_permission(struct user_namespace *, struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); -extern int nfs_setattr(struct dentry *, struct iattr *); +extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); @@ -570,8 +572,6 @@ nfs_have_writebacks(struct inode *inode) extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); -extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, - struct page *); /* * inline functions diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 38e60ec742df..6f76b32a0238 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -142,7 +142,7 @@ struct nfs_server { struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ atomic_long_t writeback; /* number of writeback pages */ - int flags; /* various flags */ + unsigned int flags; /* various flags */ /* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 @@ -153,6 +153,8 @@ struct nfs_server { #define NFS_MOUNT_LOCAL_FCNTL 0x200000 #define NFS_MOUNT_SOFTERR 0x400000 #define NFS_MOUNT_SOFTREVAL 0x800000 +#define NFS_MOUNT_WRITE_EAGER 0x01000000 +#define NFS_MOUNT_WRITE_WAIT 0x02000000 unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 103d44695323..0ba99c513649 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -38,5 +38,8 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, extern int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, struct posix_acl **pacl); +extern bool +nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt, + struct posix_acl **pacl); #endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index bfed36e342cc..b08787cd0881 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -697,7 +697,11 @@ enum nvme_opcode { nvme_opcode_name(nvme_cmd_resv_register), \ nvme_opcode_name(nvme_cmd_resv_report), \ nvme_opcode_name(nvme_cmd_resv_acquire), \ - nvme_opcode_name(nvme_cmd_resv_release)) + nvme_opcode_name(nvme_cmd_resv_release), \ + nvme_opcode_name(nvme_cmd_zone_mgmt_send), \ + nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \ + nvme_opcode_name(nvme_cmd_zone_append)) + /* @@ -1473,20 +1477,29 @@ enum { NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, - + NVME_SC_CMB_INVALID_USE = 0x12, + NVME_SC_PRP_INVALID_OFFSET = 0x13, + NVME_SC_ATOMIC_WU_EXCEEDED = 0x14, + NVME_SC_OP_DENIED = 0x15, NVME_SC_SGL_INVALID_OFFSET = 0x16, - NVME_SC_SGL_INVALID_SUBTYPE = 0x17, - + NVME_SC_RESERVED = 0x17, + NVME_SC_HOST_ID_INCONSIST = 0x18, + NVME_SC_KA_TIMEOUT_EXPIRED = 0x19, + NVME_SC_KA_TIMEOUT_INVALID = 0x1A, + NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B, NVME_SC_SANITIZE_FAILED = 0x1C, NVME_SC_SANITIZE_IN_PROGRESS = 0x1D, - + NVME_SC_SGL_INVALID_GRANULARITY = 0x1E, + NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F, NVME_SC_NS_WRITE_PROTECTED = 0x20, NVME_SC_CMD_INTERRUPTED = 0x21, + NVME_SC_TRANSIENT_TR_ERR = 0x22, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, + NVME_SC_FORMAT_IN_PROGRESS = 0x84, /* * Command Specific Status: @@ -1519,8 +1532,15 @@ enum { NVME_SC_NS_NOT_ATTACHED = 0x11a, NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, NVME_SC_CTRL_LIST_INVALID = 0x11c, + NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d, NVME_SC_BP_WRITE_PROHIBITED = 0x11e, + NVME_SC_CTRL_ID_INVALID = 0x11f, + NVME_SC_SEC_CTRL_STATE_INVALID = 0x120, + NVME_SC_CTRL_RES_NUM_INVALID = 0x121, + NVME_SC_RES_ID_INVALID = 0x122, NVME_SC_PMR_SAN_PROHIBITED = 0x123, + NVME_SC_ANA_GROUP_ID_INVALID = 0x124, + NVME_SC_ANA_ATTACH_FAILED = 0x125, /* * I/O Command Set Specific - NVM commands: diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 577f51436cf9..7e72d975cb76 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -29,11 +29,14 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that * sp_reg+sp_offset points to the iret return frame. + * + * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. + * Useful for code which doesn't have an ELF function annotation. */ #define UNWIND_HINT_TYPE_CALL 0 #define UNWIND_HINT_TYPE_REGS 1 #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 -#define UNWIND_HINT_TYPE_RET_OFFSET 3 +#define UNWIND_HINT_TYPE_FUNC 3 #ifdef CONFIG_STACK_VALIDATION @@ -109,6 +112,12 @@ struct unwind_hint { .popsection .endm +.macro STACK_FRAME_NON_STANDARD func:req + .pushsection .discard.func_stack_frame_non_standard, "aw" + .long \func - . + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -122,6 +131,8 @@ struct unwind_hint { #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 .endm +.macro STACK_FRAME_NON_STANDARD func:req +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 07ca187fc5e4..1d7992a02e36 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -26,9 +26,6 @@ static inline int of_driver_match_device(struct device *dev, return of_match_device(drv->of_match_table, dev) != NULL; } -extern struct platform_device *of_dev_get(struct platform_device *dev); -extern void of_dev_put(struct platform_device *dev); - extern int of_device_add(struct platform_device *pdev); extern int of_device_register(struct platform_device *ofdev); extern void of_device_unregister(struct platform_device *ofdev); @@ -41,11 +38,6 @@ extern int of_device_request_module(struct device *dev); extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); -static inline void of_device_node_put(struct device *dev) -{ - of_node_put(dev->of_node); -} - static inline struct device_node *of_cpu_device_node_get(int cpu) { struct device *cpu_dev; @@ -97,15 +89,11 @@ static inline int of_device_uevent_modalias(struct device *dev, return -ENODEV; } -static inline void of_device_node_put(struct device *dev) { } - -static inline const struct of_device_id *__of_match_device( +static inline const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev) { return NULL; } -#define of_match_device(matches, dev) \ - __of_match_device(of_match_ptr(matches), (dev)) static inline struct device_node *of_cpu_device_node_get(int cpu) { diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index e8b78139f78c..aaf219bd0354 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -33,8 +33,6 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index, #endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq); -extern int of_irq_parse_one(struct device_node *device, int index, - struct of_phandle_args *out_irq); extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data); extern int of_irq_to_resource(struct device_node *dev, int index, struct resource *r); @@ -42,6 +40,8 @@ extern int of_irq_to_resource(struct device_node *dev, int index, extern void of_irq_init(const struct of_device_id *matches); #ifdef CONFIG_OF_IRQ +extern int of_irq_parse_one(struct device_node *device, int index, + struct of_phandle_args *out_irq); extern int of_irq_count(struct device_node *dev); extern int of_irq_get(struct device_node *dev, int index); extern int of_irq_get_byname(struct device_node *dev, const char *name); @@ -57,6 +57,11 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, extern void of_msi_configure(struct device *dev, struct device_node *np); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else +static inline int of_irq_parse_one(struct device_node *device, int index, + struct of_phandle_args *out_irq) +{ + return -EINVAL; +} static inline int of_irq_count(struct device_node *dev) { return 0; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index cfe8c607a628..2b05e7f7c238 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -26,9 +26,6 @@ of_phy_connect(struct net_device *dev, struct device_node *phy_np, struct phy_device * of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)); -struct phy_device * -of_phy_attach(struct net_device *dev, struct device_node *phy_np, - u32 flags, phy_interface_t iface); struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); int of_phy_register_fixed_link(struct device_node *np); @@ -100,13 +97,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np, return NULL; } -static inline struct phy_device *of_phy_attach(struct net_device *dev, - struct device_node *phy_np, - u32 flags, phy_interface_t iface) -{ - return NULL; -} - static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h deleted file mode 100644 index b2a0f15f11fe..000000000000 --- a/include/linux/oprofile.h +++ /dev/null @@ -1,209 +0,0 @@ -/** - * @file oprofile.h - * - * API for machine-specific interrupts to interface - * to oprofile. - * - * @remark Copyright 2002 OProfile authors - * @remark Read the file COPYING - * - * @author John Levon <levon@movementarian.org> - */ - -#ifndef OPROFILE_H -#define OPROFILE_H - -#include <linux/types.h> -#include <linux/spinlock.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/printk.h> -#include <linux/atomic.h> - -/* Each escaped entry is prefixed by ESCAPE_CODE - * then one of the following codes, then the - * relevant data. - * These #defines live in this file so that arch-specific - * buffer sync'ing code can access them. - */ -#define ESCAPE_CODE ~0UL -#define CTX_SWITCH_CODE 1 -#define CPU_SWITCH_CODE 2 -#define COOKIE_SWITCH_CODE 3 -#define KERNEL_ENTER_SWITCH_CODE 4 -#define KERNEL_EXIT_SWITCH_CODE 5 -#define MODULE_LOADED_CODE 6 -#define CTX_TGID_CODE 7 -#define TRACE_BEGIN_CODE 8 -#define TRACE_END_CODE 9 -#define XEN_ENTER_SWITCH_CODE 10 -#define SPU_PROFILING_CODE 11 -#define SPU_CTX_SWITCH_CODE 12 -#define IBS_FETCH_CODE 13 -#define IBS_OP_CODE 14 - -struct dentry; -struct file_operations; -struct pt_regs; - -/* Operations structure to be filled in */ -struct oprofile_operations { - /* create any necessary configuration files in the oprofile fs. - * Optional. */ - int (*create_files)(struct dentry * root); - /* Do any necessary interrupt setup. Optional. */ - int (*setup)(void); - /* Do any necessary interrupt shutdown. Optional. */ - void (*shutdown)(void); - /* Start delivering interrupts. */ - int (*start)(void); - /* Stop delivering interrupts. */ - void (*stop)(void); - /* Arch-specific buffer sync functions. - * Return value = 0: Success - * Return value = -1: Failure - * Return value = 1: Run generic sync function - */ - int (*sync_start)(void); - int (*sync_stop)(void); - - /* Initiate a stack backtrace. Optional. */ - void (*backtrace)(struct pt_regs * const regs, unsigned int depth); - - /* Multiplex between different events. Optional. */ - int (*switch_events)(void); - /* CPU identification string. */ - char * cpu_type; -}; - -/** - * One-time initialisation. *ops must be set to a filled-in - * operations structure. This is called even in timer interrupt - * mode so an arch can set a backtrace callback. - * - * If an error occurs, the fields should be left untouched. - */ -int oprofile_arch_init(struct oprofile_operations * ops); - -/** - * One-time exit/cleanup for the arch. - */ -void oprofile_arch_exit(void); - -/** - * Add a sample. This may be called from any context. - */ -void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); - -/** - * Add an extended sample. Use this when the PC is not from the regs, and - * we cannot determine if we're in kernel mode from the regs. - * - * This function does perform a backtrace. - * - */ -void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, - unsigned long event, int is_kernel); - -/** - * Add an hardware sample. - */ -void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, - unsigned long event, int is_kernel, - struct task_struct *task); - -/* Use this instead when the PC value is not from the regs. Doesn't - * backtrace. */ -void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); - -/* add a backtrace entry, to be called from the ->backtrace callback */ -void oprofile_add_trace(unsigned long eip); - - -/** - * Create a file of the given name as a child of the given root, with - * the specified file operations. - */ -int oprofilefs_create_file(struct dentry * root, - char const * name, const struct file_operations * fops); - -int oprofilefs_create_file_perm(struct dentry * root, - char const * name, const struct file_operations * fops, int perm); - -/** Create a file for read/write access to an unsigned long. */ -int oprofilefs_create_ulong(struct dentry * root, - char const * name, ulong * val); - -/** Create a file for read-only access to an unsigned long. */ -int oprofilefs_create_ro_ulong(struct dentry * root, - char const * name, ulong * val); - -/** Create a file for read-only access to an atomic_t. */ -int oprofilefs_create_ro_atomic(struct dentry * root, - char const * name, atomic_t * val); - -/** create a directory */ -struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name); - -/** - * Write the given asciz string to the given user buffer @buf, updating *offset - * appropriately. Returns bytes written or -EFAULT. - */ -ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset); - -/** - * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, - * updating *offset appropriately. Returns bytes written or -EFAULT. - */ -ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset); - -/** - * Read an ASCII string for a number from a userspace buffer and fill *val on success. - * Returns 0 on success, < 0 on error. - */ -int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); - -/** lock for read/write safety */ -extern raw_spinlock_t oprofilefs_lock; - -/** - * Add the contents of a circular buffer to the event buffer. - */ -void oprofile_put_buff(unsigned long *buf, unsigned int start, - unsigned int stop, unsigned int max); - -unsigned long oprofile_get_cpu_buffer_size(void); -void oprofile_cpu_buffer_inc_smpl_lost(void); - -/* cpu buffer functions */ - -struct op_sample; - -struct op_entry { - struct ring_buffer_event *event; - struct op_sample *sample; - unsigned long size; - unsigned long *data; -}; - -void oprofile_write_reserve(struct op_entry *entry, - struct pt_regs * const regs, - unsigned long pc, int code, int size); -int oprofile_add_data(struct op_entry *entry, unsigned long val); -int oprofile_add_data64(struct op_entry *entry, u64 val); -int oprofile_write_commit(struct op_entry *entry); - -#ifdef CONFIG_HW_PERF_EVENTS -int __init oprofile_perf_init(struct oprofile_operations *ops); -void oprofile_perf_exit(void); -char *op_name_from_perf_id(void); -#else -static inline int __init oprofile_perf_init(struct oprofile_operations *ops) -{ - pr_info("oprofile: hardware counters not available\n"); - return -ENODEV; -} -static inline void oprofile_perf_exit(void) { } -#endif /* CONFIG_HW_PERF_EVENTS */ - -#endif /* OPROFILE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index ec5d0290e0ee..04a34c08e0a6 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -592,15 +592,9 @@ static inline void ClearPageCompound(struct page *page) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); int PageHeadHuge(struct page *page); -bool page_huge_active(struct page *page); #else TESTPAGEFLAG_FALSE(Huge) TESTPAGEFLAG_FALSE(HeadHuge) - -static inline bool page_huge_active(struct page *page) -{ - return 0; -} #endif @@ -816,7 +810,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) /* * Flags checked when a page is freed. Pages being freed should not have - * these flags set. It they are, there is a problem. + * these flags set. If they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ (1UL << PG_lru | 1UL << PG_locked | \ @@ -827,7 +821,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have these flags set. It they are set, + * Pages being prepped should not have these flags set. If they are set, * there has been a kernel bug or struct page corruption. * * __PG_HWPOISON is exceptional because it needs to be kept beyond page's diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 85bd413e784e..679591301994 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -12,7 +12,6 @@ struct page_counter { unsigned long low; unsigned long high; unsigned long max; - struct page_counter *parent; /* effective memory.min and memory.min usage tracking */ unsigned long emin; @@ -27,6 +26,14 @@ struct page_counter { /* legacy */ unsigned long watermark; unsigned long failcnt; + + /* + * 'parent' is placed here to be far from 'usage' to reduce + * cache false sharing, as 'usage' is written mostly while + * parent is frequently read for cgroup's hierarchical + * counting nature. + */ + struct page_counter *parent; }; #if BITS_PER_LONG == 32 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d5570deff400..20225b067583 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -315,6 +315,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, #define FGP_NOWAIT 0x00000020 #define FGP_FOR_MMAP 0x00000040 #define FGP_HEAD 0x00000080 +#define FGP_ENTRY 0x00000100 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); @@ -450,8 +451,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) } unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - unsigned int nr_entries, struct page **entries, - pgoff_t *indices); + pgoff_t end, struct pagevec *pvec, pgoff_t *indices); unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); @@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page) return wait_on_page_bit_killable(compound_head(page), PG_locked); } -extern void put_and_wait_on_page_locked(struct page *page); - +int put_and_wait_on_page_locked(struct page *page, int state); void wait_on_page_writeback(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); @@ -757,9 +756,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); -int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); +void replace_page_cache_page(struct page *old, struct page *new); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); +loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, + int whence); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index ad4ddc17d403..7f3f19065a9f 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -25,10 +25,6 @@ struct pagevec { void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); -unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_entries, - pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup_range(struct pagevec *pvec, struct address_space *mapping, diff --git a/include/linux/parport.h b/include/linux/parport.h index 1fb508c19e83..f981f794c850 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -297,6 +297,37 @@ int __must_check __parport_register_driver(struct parport_driver *, * parport_register_driver must be a macro so that KBUILD_MODNAME can * be expanded */ + +/** + * parport_register_driver - register a parallel port device driver + * @driver: structure describing the driver + * + * This can be called by a parallel port device driver in order + * to receive notifications about ports being found in the + * system, as well as ports no longer available. + * + * If devmodel is true then the new device model is used + * for registration. + * + * The @driver structure is allocated by the caller and must not be + * deallocated until after calling parport_unregister_driver(). + * + * If using the non device model: + * The driver's attach() function may block. The port that + * attach() is given will be valid for the duration of the + * callback, but if the driver wants to take a copy of the + * pointer it must call parport_get_port() to do so. Calling + * parport_register_device() on that port will do this for you. + * + * The driver's detach() function may block. The port that + * detach() is given will be valid for the duration of the + * callback, but if the driver wants to take a copy of the + * pointer it must call parport_get_port() to do so. + * + * + * Returns 0 on success. The non device model will always succeeds. + * but the new device model can fail and will return the error code. + **/ #define parport_register_driver(driver) \ __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) diff --git a/include/linux/parser.h b/include/linux/parser.h index 89e2b23fb888..dd79f45a37b8 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -29,6 +29,7 @@ typedef struct { int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); +int match_uint(substring_t *s, unsigned int *result); int match_u64(substring_t *, u64 *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index cc66bec8be90..b82c9b100e97 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -13,6 +13,12 @@ struct pci_epc; +enum pci_epc_interface_type { + UNKNOWN_INTERFACE = -1, + PRIMARY_INTERFACE, + SECONDARY_INTERFACE, +}; + enum pci_epc_irq_type { PCI_EPC_IRQ_UNKNOWN, PCI_EPC_IRQ_LEGACY, @@ -20,6 +26,19 @@ enum pci_epc_irq_type { PCI_EPC_IRQ_MSIX, }; +static inline const char * +pci_epc_interface_string(enum pci_epc_interface_type type) +{ + switch (type) { + case PRIMARY_INTERFACE: + return "primary"; + case SECONDARY_INTERFACE: + return "secondary"; + default: + return "UNKNOWN interface"; + } +} + /** * struct pci_epc_ops - set of function pointers for performing EPC operations * @write_header: ops to populate configuration space header @@ -36,6 +55,7 @@ enum pci_epc_irq_type { * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC * from the MSI-X capability register * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt + * @map_msi_irq: ops to map physical address to MSI address and return MSI data * @start: ops to start the PCI link * @stop: ops to stop the PCI link * @owner: the module owner containing the ops @@ -58,6 +78,10 @@ struct pci_epc_ops { int (*get_msix)(struct pci_epc *epc, u8 func_no); int (*raise_irq)(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); + int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr, u8 interrupt_num, + u32 entry_size, u32 *msi_data, + u32 *msi_addr_offset); int (*start)(struct pci_epc *epc); void (*stop)(struct pci_epc *epc); const struct pci_epc_features* (*get_features)(struct pci_epc *epc, @@ -175,10 +199,12 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); void pci_epc_destroy(struct pci_epc *epc); -int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); +int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, + enum pci_epc_interface_type type); void pci_epc_linkup(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); -void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); +void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, + enum pci_epc_interface_type type); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, @@ -195,14 +221,19 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, enum pci_barno, u32 offset); int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); +int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr, u8 interrupt_num, + u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, u8 func_no); -unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features - *epc_features); +enum pci_barno +pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features); +enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features + *epc_features, enum pci_barno bar); struct pci_epc *pci_epc_get(const char *epc_name); void pci_epc_put(struct pci_epc *epc); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 6644ff3b0702..6833e2160ef1 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -9,11 +9,13 @@ #ifndef __LINUX_PCI_EPF_H #define __LINUX_PCI_EPF_H +#include <linux/configfs.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> struct pci_epf; +enum pci_epc_interface_type; enum pci_notify_event { CORE_INIT, @@ -21,6 +23,7 @@ enum pci_notify_event { }; enum pci_barno { + NO_BAR = -1, BAR_0, BAR_1, BAR_2, @@ -60,10 +63,13 @@ struct pci_epf_header { * @bind: ops to perform when a EPC device has been bound to EPF device * @unbind: ops to perform when a binding has been lost between a EPC device * and EPF device + * @add_cfs: ops to initialize function specific configfs attributes */ struct pci_epf_ops { int (*bind)(struct pci_epf *epf); void (*unbind)(struct pci_epf *epf); + struct config_group *(*add_cfs)(struct pci_epf *epf, + struct config_group *group); }; /** @@ -118,6 +124,12 @@ struct pci_epf_bar { * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc * @nb: notifier block to notify EPF of any EPC events (like linkup) * @lock: mutex to protect pci_epf_ops + * @sec_epc: the secondary EPC device to which this EPF device is bound + * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary + * EPC device + * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC + * @sec_epc_func_no: unique (physical) function number within the secondary EPC + * @group: configfs group associated with the EPF device */ struct pci_epf { struct device dev; @@ -134,6 +146,13 @@ struct pci_epf { struct notifier_block nb; /* mutex to protect against concurrent access of pci_epf_ops */ struct mutex lock; + + /* Below members are to attach secondary EPC to an endpoint function */ + struct pci_epc *sec_epc; + struct list_head sec_epc_list; + struct pci_epf_bar sec_epc_bar[6]; + u8 sec_epc_func_no; + struct config_group *group; }; /** @@ -164,16 +183,17 @@ static inline void *epf_get_drvdata(struct pci_epf *epf) return dev_get_drvdata(&epf->dev); } -const struct pci_epf_device_id * -pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf); struct pci_epf *pci_epf_create(const char *name); void pci_epf_destroy(struct pci_epf *epf); int __pci_epf_register_driver(struct pci_epf_driver *driver, struct module *owner); void pci_epf_unregister_driver(struct pci_epf_driver *driver); void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, - size_t align); -void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); + size_t align, enum pci_epc_interface_type type); +void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, + enum pci_epc_interface_type type); int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); +struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf, + struct config_group *group); #endif /* __LINUX_PCI_EPF_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 53f4904ee83d..86c799c97b77 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1926,7 +1926,7 @@ enum pci_fixup_pass { }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ +#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ class_shift, hook) \ __ADDRESSABLE(hook) \ asm(".section " #sec ", \"a\" \n" \ @@ -1935,10 +1935,33 @@ enum pci_fixup_pass { ".long " #class ", " #class_shift " \n" \ ".long " #hook " - . \n" \ ".previous \n"); + +/* + * Clang's LTO may rename static functions in C, but has no way to + * handle such renamings when referenced from inline asm. To work + * around this, create global C stubs for these cases. + */ +#ifdef CONFIG_LTO_CLANG +#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook, stub) \ + void stub(struct pci_dev *dev); \ + void stub(struct pci_dev *dev) \ + { \ + hook(dev); \ + } \ + ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, stub) +#else +#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook, stub) \ + ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) +#endif + #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ class_shift, hook) \ __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ - class_shift, hook) + class_shift, hook, __UNIQUE_ID(hook)) #else /* Anonymous variables would be nice... */ #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d8156a5dbee8..a76ccb697bef 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -51,6 +51,7 @@ #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 +#define PCI_CLASS_MEMORY_CXL 0x0502 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 @@ -881,6 +882,7 @@ #define PCI_DEVICE_ID_TI_X620 0xac8d #define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f +#define PCI_DEVICE_ID_TI_J721E 0xb00d #define PCI_DEVICE_ID_TI_DRA74x 0xb500 #define PCI_DEVICE_ID_TI_DRA72x 0xb501 @@ -2588,6 +2590,8 @@ #define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9a38f579bc76..3f7f89ea5e51 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -606,6 +606,7 @@ struct swevent_hlist { #define PERF_ATTACH_TASK 0x04 #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 +#define PERF_ATTACH_SCHED_CB 0x20 struct perf_cgroup; struct perf_buffer; @@ -872,6 +873,7 @@ struct perf_cpu_context { struct list_head cgrp_cpuctx_entry; #endif + struct list_head sched_cb_entry; int sched_cb_usage; int online; @@ -998,7 +1000,7 @@ struct perf_sample_data { struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 period; - u64 weight; + union perf_sample_weight weight; u64 txn; union perf_mem_data_src data_src; @@ -1047,7 +1049,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->raw = NULL; data->br_stack = NULL; data->period = period; - data->weight = 0; + data->weight.full = 0; data->data_src.val = PERF_MEM_NA; data->txn = 0; } diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8fcdfa52eb4b..5e772392a379 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ -#ifndef pte_sw_mkyoung -static inline pte_t pte_sw_mkyoung(pte_t pte) -{ - return pte; -} -#define pte_sw_mkyoung pte_sw_mkyoung -#endif - #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif @@ -912,6 +904,10 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, #define pgprot_device pgprot_noncached #endif +#ifndef pgprot_mhp +#define pgprot_mhp(prot) (prot) +#endif + #ifdef CONFIG_MMU #ifndef pgprot_modify #define pgprot_modify pgprot_modify @@ -1314,6 +1310,17 @@ static inline int pmd_trans_unstable(pmd_t *pmd) #endif } +/* + * the ordering of these checks is important for pmds with _page_devmap set. + * if we check pmd_trans_unstable() first we will trip the bad_pmd() check + * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly + * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. + */ +static inline int pmd_devmap_trans_unstable(pmd_t *pmd) +{ + return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); +} + #ifndef CONFIG_NUMA_BALANCING /* * Technically a PTE can be PROTNONE even when not doing NUMA balancing but diff --git a/include/linux/phy.h b/include/linux/phy.h index 9effb511acde..1a12e4436b5b 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -71,11 +71,11 @@ extern const int phy_10gbit_features_array[1]; /* * Set phydev->irq to PHY_POLL if interrupts are not supported, - * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if - * the attached driver handles the interrupt + * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if + * the attached MAC driver handles the interrupt */ #define PHY_POLL -1 -#define PHY_IGNORE_INTERRUPT -2 +#define PHY_MAC_INTERRUPT -2 #define PHY_IS_INTERNAL 0x00000001 #define PHY_RST_AFTER_CLK_EN 0x00000002 @@ -104,8 +104,10 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII + * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX + * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR @@ -135,8 +137,10 @@ typedef enum { PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, + PHY_INTERFACE_MODE_100BASEX, PHY_INTERFACE_MODE_1000BASEX, PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_5GBASER, PHY_INTERFACE_MODE_RXAUI, PHY_INTERFACE_MODE_XAUI, /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ @@ -207,6 +211,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "1000base-x"; case PHY_INTERFACE_MODE_2500BASEX: return "2500base-x"; + case PHY_INTERFACE_MODE_5GBASER: + return "5gbase-r"; case PHY_INTERFACE_MODE_RXAUI: return "rxaui"; case PHY_INTERFACE_MODE_XAUI: @@ -217,6 +223,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "usxgmii"; case PHY_INTERFACE_MODE_10GKR: return "10gbase-kr"; + case PHY_INTERFACE_MODE_100BASEX: + return "100base-x"; default: return "unknown"; } @@ -484,6 +492,7 @@ struct macsec_ops; * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * @loopback_enabled: Set true if this PHY has been loopbacked successfully. * @downshifted_rate: Set true if link speed has been downshifted. + * @is_on_sfp_module: Set true if PHY is located on an SFP module. * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. * @irq: IRQ number of the PHY's interrupt (-1 if none) @@ -499,6 +508,7 @@ struct macsec_ops; * * @speed: Current link speed * @duplex: Current duplex + * @port: Current port * @pause: Current pause * @asym_pause: Current asymmetric pause * @supported: Combined MAC/PHY supported linkmodes @@ -556,6 +566,7 @@ struct phy_device { unsigned sysfs_links:1; unsigned loopback_enabled:1; unsigned downshifted_rate:1; + unsigned is_on_sfp_module:1; unsigned autoneg:1; /* The most recently read link state */ @@ -577,6 +588,7 @@ struct phy_device { */ int speed; int duplex; + int port; int pause; int asym_pause; u8 master_slave_get; @@ -644,8 +656,11 @@ struct phy_device { const struct macsec_ops *macsec_ops; #endif }; -#define to_phy_device(d) container_of(to_mdio_device(d), \ - struct phy_device, mdio) + +static inline struct phy_device *to_phy_device(const struct device *dev) +{ + return container_of(to_mdio_device(dev), struct phy_device, mdio); +} /** * struct phy_tdr_config - Configuration of a TDR raw test @@ -1193,11 +1208,11 @@ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad, * @phydev: the phy_device struct * * NOTE: must be kept in sync with addition/removal of PHY_POLL and - * PHY_IGNORE_INTERRUPT + * PHY_MAC_INTERRUPT */ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) { - return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; + return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT; } /** @@ -1284,6 +1299,15 @@ static inline bool phy_is_internal(struct phy_device *phydev) } /** + * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module + * @phydev: the phy_device struct + */ +static inline bool phy_on_sfp(struct phy_device *phydev) +{ + return phydev->is_on_sfp_module; +} + +/** * phy_interface_mode_is_rgmii - Convenience function for testing if a * PHY interface mode is RGMII (all variants) * @mode: the &phy_interface_t enum diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h deleted file mode 100644 index 8429e73911a1..000000000000 --- a/include/linux/platform_data/clk-u300.h +++ /dev/null @@ -1 +0,0 @@ -void __init u300_clk_init(void __iomem *base); diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 86376779ab31..5ff8597ceabd 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -1286,6 +1286,16 @@ enum ec_feature_code { EC_FEATURE_ISH = 40, /* New TCPMv2 TYPEC_ prefaced commands supported */ EC_FEATURE_TYPEC_CMD = 41, + /* + * The EC will wait for direction from the AP to enter Type-C alternate + * modes or USB4. + */ + EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY = 42, + /* + * The EC will wait for an acknowledge from the AP after setting the + * mux. + */ + EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) @@ -4600,6 +4610,7 @@ enum ec_codec_i2s_rx_subcmd { EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2, EC_CODEC_I2S_RX_SET_DAIFMT = 0x3, EC_CODEC_I2S_RX_SET_BCLK = 0x4, + EC_CODEC_I2S_RX_RESET = 0x5, EC_CODEC_I2S_RX_SUBCMD_COUNT, }; @@ -4731,6 +4742,7 @@ enum ec_reboot_cmd { EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */ EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */ + EC_REBOOT_COLD_AP_OFF = 8, /* Cold-reboot and don't boot AP */ }; /* Flags for ec_params_reboot_ec.reboot_flags */ @@ -5567,6 +5579,32 @@ struct ec_response_typec_discovery { struct svid_mode_info svids[0]; } __ec_align1; +/* USB Type-C commands for AP-controlled device policy. */ +#define EC_CMD_TYPEC_CONTROL 0x0132 + +enum typec_control_command { + TYPEC_CONTROL_COMMAND_EXIT_MODES, + TYPEC_CONTROL_COMMAND_CLEAR_EVENTS, + TYPEC_CONTROL_COMMAND_ENTER_MODE, +}; + +struct ec_params_typec_control { + uint8_t port; + uint8_t command; /* enum typec_control_command */ + uint16_t reserved; + + /* + * This section will be interpreted based on |command|. Define a + * placeholder structure to avoid having to increase the size and bump + * the command version when adding new sub-commands. + */ + union { + uint32_t clear_events_mask; + uint8_t mode_to_enter; /* enum typec_mode */ + uint8_t placeholder[128]; + }; +} __ec_align1; + /* * Gather all status information for a port. * @@ -6054,6 +6092,13 @@ struct ec_params_charger_control { uint8_t allow_charging; } __ec_align_size1; +/* Get ACK from the USB-C SS muxes */ +#define EC_CMD_USB_PD_MUX_ACK 0x0603 + +struct ec_params_usb_pd_mux_ack { + uint8_t port; /* USB-C port number */ +} __ec_align1; + /*****************************************************************************/ /* * Reserve a range of host commands for board-specific, experimental, or diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h deleted file mode 100644 index 069637e6004f..000000000000 --- a/include/linux/platform_data/dma-atmel.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Header file for the Atmel AHB DMA Controller driver - * - * Copyright (C) 2008 Atmel Corporation - */ -#ifndef AT_HDMAC_H -#define AT_HDMAC_H - -#include <linux/dmaengine.h> - -/** - * struct at_dma_platform_data - Controller configuration parameters - * @nr_channels: Number of channels supported by hardware (max 8) - * @cap_mask: dma_capability flags supported by the platform - */ -struct at_dma_platform_data { - unsigned int nr_channels; - dma_cap_mask_t cap_mask; -}; - -/** - * struct at_dma_slave - Controller-specific information about a slave - * @dma_dev: required DMA master device - * @cfg: Platform-specific initializer for the CFG register - */ -struct at_dma_slave { - struct device *dma_dev; - u32 cfg; -}; - - -/* Platform-configurable bits in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - -#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ -#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ -#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ -#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ -#define ATC_SRC_H2SEL_SW (0x0 << 9) -#define ATC_SRC_H2SEL_HW (0x1 << 9) -#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ -#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ -#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ -#define ATC_DST_H2SEL_SW (0x0 << 13) -#define ATC_DST_H2SEL_HW (0x1 << 13) -#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ -#define ATC_SOD (0x1 << 16) /* Stop On Done */ -#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ -#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ -#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ -#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) -#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) -#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ -#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ -#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) -#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) -#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) - - -#endif /* AT_HDMAC_H */ diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h deleted file mode 100644 index 4cca529f8d56..000000000000 --- a/include/linux/platform_data/dma-coh901318.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Platform data for the COH901318 DMA controller - * Copyright (C) 2007-2013 ST-Ericsson - */ - -#ifndef PLAT_COH901318_H -#define PLAT_COH901318_H - -#ifdef CONFIG_COH901318 - -/* We only support the U300 DMA channels */ -#define U300_DMA_MSL_TX_0 0 -#define U300_DMA_MSL_TX_1 1 -#define U300_DMA_MSL_TX_2 2 -#define U300_DMA_MSL_TX_3 3 -#define U300_DMA_MSL_TX_4 4 -#define U300_DMA_MSL_TX_5 5 -#define U300_DMA_MSL_TX_6 6 -#define U300_DMA_MSL_RX_0 7 -#define U300_DMA_MSL_RX_1 8 -#define U300_DMA_MSL_RX_2 9 -#define U300_DMA_MSL_RX_3 10 -#define U300_DMA_MSL_RX_4 11 -#define U300_DMA_MSL_RX_5 12 -#define U300_DMA_MSL_RX_6 13 -#define U300_DMA_MMCSD_RX_TX 14 -#define U300_DMA_MSPRO_TX 15 -#define U300_DMA_MSPRO_RX 16 -#define U300_DMA_UART0_TX 17 -#define U300_DMA_UART0_RX 18 -#define U300_DMA_APEX_TX 19 -#define U300_DMA_APEX_RX 20 -#define U300_DMA_PCM_I2S0_TX 21 -#define U300_DMA_PCM_I2S0_RX 22 -#define U300_DMA_PCM_I2S1_TX 23 -#define U300_DMA_PCM_I2S1_RX 24 -#define U300_DMA_XGAM_CDI 25 -#define U300_DMA_XGAM_PDI 26 -#define U300_DMA_SPI_TX 27 -#define U300_DMA_SPI_RX 28 -#define U300_DMA_GENERAL_PURPOSE_0 29 -#define U300_DMA_GENERAL_PURPOSE_1 30 -#define U300_DMA_GENERAL_PURPOSE_2 31 -#define U300_DMA_GENERAL_PURPOSE_3 32 -#define U300_DMA_GENERAL_PURPOSE_4 33 -#define U300_DMA_GENERAL_PURPOSE_5 34 -#define U300_DMA_GENERAL_PURPOSE_6 35 -#define U300_DMA_GENERAL_PURPOSE_7 36 -#define U300_DMA_GENERAL_PURPOSE_8 37 -#define U300_DMA_UART1_TX 38 -#define U300_DMA_UART1_RX 39 - -#define U300_DMA_DEVICE_CHANNELS 32 -#define U300_DMA_CHANNELS 40 - -/** - * coh901318_filter_id() - DMA channel filter function - * @chan: dma channel handle - * @chan_id: id of dma channel to be filter out - * - * In dma_request_channel() it specifies what channel id to be requested - */ -bool coh901318_filter_id(struct dma_chan *chan, void *chan_id); -#else -static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) -{ - return false; -} -#endif - -#endif /* PLAT_COH901318_H */ diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 30e676b36b24..725602d9df91 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -57,15 +57,4 @@ struct sdma_script_start_addrs { /* End of v4 array */ }; -/** - * struct sdma_platform_data - platform specific data for SDMA engine - * - * @fw_name The firmware name - * @script_addrs SDMA scripts addresses in SDMA ROM - */ -struct sdma_platform_data { - char *fw_name; - struct sdma_script_start_addrs *script_addrs; -}; - #endif /* __MACH_MXC_SDMA_H__ */ diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h deleted file mode 100644 index a2c56fcd0534..000000000000 --- a/include/linux/platform_data/efm32-spi.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ -#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__ - -#include <linux/types.h> - -/** - * struct efm32_spi_pdata - * @location: pinmux location for the I/O pins (to be written to the ROUTE - * register) - */ -struct efm32_spi_pdata { - u8 location; -}; -#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */ diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h deleted file mode 100644 index ccbb8f11db75..000000000000 --- a/include/linux/platform_data/efm32-uart.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * - * - */ -#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ -#define __LINUX_PLATFORM_DATA_EFM32_UART_H__ - -#include <linux/types.h> - -/** - * struct efm32_uart_pdata - * @location: pinmux location for the I/O pins (to be written to the ROUTE - * register) - */ -struct efm32_uart_pdata { - u8 location; -}; -#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */ diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h deleted file mode 100644 index c628bb5e1061..000000000000 --- a/include/linux/platform_data/i2c-hid.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * HID over I2C protocol implementation - * - * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com> - * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive for - * more details. - */ - -#ifndef __LINUX_I2C_HID_H -#define __LINUX_I2C_HID_H - -#include <linux/regulator/consumer.h> -#include <linux/types.h> - -/** - * struct i2chid_platform_data - used by hid over i2c implementation. - * @hid_descriptor_address: i2c register where the HID descriptor is stored. - * @supplies: regulators for powering on the device. - * @post_power_delay_ms: delay after powering on before device is usable. - * - * Note that it is the responsibility of the platform driver (or the acpi 5.0 - * driver, or the flattened device tree) to setup the irq related to the gpio in - * the struct i2c_board_info. - * The platform driver should also setup the gpio according to the device: - * - * A typical example is the following: - * irq = gpio_to_irq(intr_gpio); - * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info - * gpio_request(intr_gpio, "elan-irq"); - * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP); - */ -struct i2c_hid_platform_data { - u16 hid_descriptor_address; - struct regulator_bulk_data supplies[2]; - int post_power_delay_ms; -}; - -#endif /* __LINUX_I2C_HID_H */ diff --git a/include/linux/platform_data/mlxcpld.h b/include/linux/platform_data/mlxcpld.h new file mode 100644 index 000000000000..d7610b528856 --- /dev/null +++ b/include/linux/platform_data/mlxcpld.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Mellanox I2C multiplexer support in CPLD + * + * Copyright (C) 2016-2020 Mellanox Technologies + */ + +#ifndef _LINUX_I2C_MLXCPLD_H +#define _LINUX_I2C_MLXCPLD_H + +/* Platform data for the CPLD I2C multiplexers */ + +/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info + * @chan_ids - channels array + * @num_adaps - number of adapters + * @sel_reg_addr - mux select register offset in CPLD space + * @reg_size: register size in bytes + * @handle: handle to be passed by callback + * @completion_notify: callback to notify when all the adapters are created + */ +struct mlxcpld_mux_plat_data { + int *chan_ids; + int num_adaps; + int sel_reg_addr; + u8 reg_size; + void *handle; + int (*completion_notify)(void *handle, struct i2c_adapter *parent, + struct i2c_adapter *adapters[]); +}; + +#endif /* _LINUX_I2C_MLXCPLD_H */ diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index f0b8947e6b07..91051e9907f3 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -108,8 +108,7 @@ struct omap_mmc_platform_data { const char *name; u32 ocr_mask; - /* Card detection IRQs */ - int card_detect_irq; + /* Card detection */ int (*card_detect)(struct device *dev, int slot); unsigned int ban_openended:1; diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h index ca8337695c2a..27ea99af6e1d 100644 --- a/include/linux/platform_data/simplefb.h +++ b/include/linux/platform_data/simplefb.h @@ -16,6 +16,7 @@ #define SIMPLEFB_FORMATS \ { \ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \ + { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \ diff --git a/include/linux/platform_data/x86/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h deleted file mode 100644 index b08dcb183fca..000000000000 --- a/include/linux/platform_data/x86/mlxcpld.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * mlxcpld.h - Mellanox I2C multiplexer support in CPLD - * - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Michael Shych <michaels@mellanox.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _LINUX_I2C_MLXCPLD_H -#define _LINUX_I2C_MLXCPLD_H - -/* Platform data for the CPLD I2C multiplexers */ - -/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info - * @adap_ids - adapter array - * @num_adaps - number of adapters - * @sel_reg_addr - mux select register offset in CPLD space - */ -struct mlxcpld_mux_plat_data { - int *adap_ids; - int num_adaps; - int sel_reg_addr; -}; - -#endif /* _LINUX_I2C_MLXCPLD_H */ diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h new file mode 100644 index 000000000000..a6329003aee7 --- /dev/null +++ b/include/linux/platform_profile.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Platform profile sysfs interface + * + * See Documentation/ABI/testing/sysfs-platform_profile.rst for more + * information. + */ + +#ifndef _PLATFORM_PROFILE_H_ +#define _PLATFORM_PROFILE_H_ + +#include <linux/bitops.h> + +/* + * If more options are added please update profile_names array in + * platform_profile.c and sysfs-platform_profile documentation. + */ + +enum platform_profile_option { + PLATFORM_PROFILE_LOW_POWER, + PLATFORM_PROFILE_COOL, + PLATFORM_PROFILE_QUIET, + PLATFORM_PROFILE_BALANCED, + PLATFORM_PROFILE_BALANCED_PERFORMANCE, + PLATFORM_PROFILE_PERFORMANCE, + PLATFORM_PROFILE_LAST, /*must always be last */ +}; + +struct platform_profile_handler { + unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + int (*profile_get)(struct platform_profile_handler *pprof, + enum platform_profile_option *profile); + int (*profile_set)(struct platform_profile_handler *pprof, + enum platform_profile_option profile); +}; + +int platform_profile_register(struct platform_profile_handler *pprof); +int platform_profile_remove(void); +void platform_profile_notify(void); + +#endif /*_PLATFORM_PROFILE_H_*/ diff --git a/include/linux/pm.h b/include/linux/pm.h index 47aca6bac1d6..482313a8ccfc 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -537,6 +537,8 @@ struct pm_subsys_data { spinlock_t lock; unsigned int refcount; #ifdef CONFIG_PM_CLK + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; struct list_head clock_list; #endif #ifdef CONFIG_PM_GENERIC_DOMAINS diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2ca919ae8d36..dfcfbcecc34b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -9,6 +9,7 @@ #define _LINUX_PM_DOMAIN_H #include <linux/device.h> +#include <linux/ktime.h> #include <linux/mutex.h> #include <linux/pm.h> #include <linux/err.h> @@ -55,6 +56,10 @@ * * GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain * powered on except for system suspend. + * + * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its + * components' next wakeup when determining the + * optimal idle state. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -62,6 +67,7 @@ #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) #define GENPD_FLAG_CPU_DOMAIN (1U << 4) #define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) +#define GENPD_FLAG_MIN_RESIDENCY (1U << 6) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -129,6 +135,7 @@ struct generic_pm_domain { unsigned int state); struct gpd_dev_ops dev_ops; s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ + ktime_t next_wakeup; /* Maintained by the domain governor */ bool max_off_time_changed; bool cached_power_down_ok; bool cached_power_down_state_idx; @@ -191,6 +198,7 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + ktime_t next_wakeup; void *data; }; @@ -217,6 +225,7 @@ int pm_genpd_remove(struct generic_pm_domain *genpd); int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb); int dev_pm_genpd_remove_notifier(struct device *dev); +void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -275,6 +284,9 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev) return -EOPNOTSUPP; } +static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) +{ } + #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) #define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 1435c054016a..c0371efa4a0f 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -98,6 +98,9 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index); + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); @@ -111,6 +114,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, bool available); struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); @@ -143,28 +148,32 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) void dev_pm_opp_put_prop_name(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); +struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); -int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); +int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_sync_regulators(struct device *dev); #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} @@ -184,6 +193,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) return 0; } +static inline +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index) +{ + return 0; +} + static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; @@ -217,31 +233,37 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, unsigned long u_volt) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} @@ -249,7 +271,7 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -280,19 +302,19 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} @@ -300,57 +322,76 @@ static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} +static inline struct opp_table * +devm_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name) +static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} +static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, + const char **names, struct device ***virt_devs) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, + struct opp_table *dst_table, struct dev_pm_opp *src_opp) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - return -ENOTSUPP; + return -EOPNOTSUPP; } -static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { return -EOPNOTSUPP; } static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) @@ -366,11 +407,17 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask { } +static inline int dev_pm_opp_sync_regulators(struct device *dev) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); +int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); @@ -387,12 +434,17 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; +} + +static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) @@ -401,7 +453,7 @@ static inline void dev_pm_opp_of_remove_table(struct device *dev) static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) @@ -410,7 +462,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) @@ -426,7 +478,7 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) static inline int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_unregister_em(struct device *dev) @@ -435,12 +487,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #endif diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h index 1ea5bae708a1..12cbbf305969 100644 --- a/include/linux/pmbus.h +++ b/include/linux/pmbus.h @@ -34,6 +34,15 @@ */ #define PMBUS_WRITE_PROTECTED BIT(1) +/* + * PMBUS_NO_CAPABILITY + * + * Some PMBus chips don't respond with valid data when reading the CAPABILITY + * register. For such chips, this flag should be set so that the PMBus core + * driver doesn't use CAPABILITY to determine it's behavior. + */ +#define PMBUS_NO_CAPABILITY BIT(2) + struct pmbus_platform_data { u32 flags; /* Device specific flags */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 90797f1b421d..307094ebb88c 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -15,6 +15,8 @@ #include <linux/refcount.h> #include <uapi/linux/posix_acl.h> +struct user_namespace; + struct posix_acl_entry { short e_tag; unsigned short e_perm; @@ -61,23 +63,24 @@ posix_acl_release(struct posix_acl *acl) extern void posix_acl_init(struct posix_acl *, int); extern struct posix_acl *posix_acl_alloc(int, gfp_t); -extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *); -extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); -extern int set_posix_acl(struct inode *, int, struct posix_acl *); +extern int set_posix_acl(struct user_namespace *, struct inode *, int, + struct posix_acl *); #ifdef CONFIG_FS_POSIX_ACL -extern int posix_acl_chmod(struct inode *, umode_t); +int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); -extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); +int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *, + struct posix_acl **); -extern int simple_set_acl(struct inode *, struct posix_acl *, int); +extern int simple_set_acl(struct user_namespace *, struct inode *, + struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); @@ -85,6 +88,9 @@ struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); +int posix_acl_valid(struct user_namespace *, const struct posix_acl *); +int posix_acl_permission(struct user_namespace *, struct inode *, + const struct posix_acl *, int); static inline void cache_no_acl(struct inode *inode) { @@ -92,7 +98,8 @@ static inline void cache_no_acl(struct inode *inode) inode->i_default_acl = NULL; } #else -static inline int posix_acl_chmod(struct inode *inode, umode_t mode) +static inline int posix_acl_chmod(struct user_namespace *mnt_userns, + struct inode *inode, umode_t mode) { return 0; } diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 2387709991b5..060e8d203181 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -33,13 +33,17 @@ posix_acl_xattr_count(size_t size) } #ifdef CONFIG_FS_POSIX_ACL -void posix_acl_fix_xattr_from_user(void *value, size_t size); -void posix_acl_fix_xattr_to_user(void *value, size_t size); +void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, + void *value, size_t size); +void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, + void *value, size_t size); #else -static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) +static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, + void *value, size_t size) { } -static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) +static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, + void *value, size_t size) { } #endif diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h deleted file mode 100644 index 02f94a1b323b..000000000000 --- a/include/linux/power/max8903_charger.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver - * - * Copyright (C) 2011 Samsung Electronics - * MyungJoo Ham <myungjoo.ham@samsung.com> - */ - -#ifndef __MAX8903_CHARGER_H__ -#define __MAX8903_CHARGER_H__ - -struct max8903_pdata { - /* - * GPIOs - * cen, chg, flt, dcm and usus are optional. - * dok and uok are not optional depending on the status of - * dc_valid and usb_valid. - */ - int cen; /* Charger Enable input */ - int dok; /* DC(Adapter) Power OK output */ - int uok; /* USB Power OK output */ - int chg; /* Charger status output */ - int flt; /* Fault output */ - int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */ - int usus; /* USB Suspend Input (1: suspended) */ - - /* - * DC(Adapter/TA) is wired - * When dc_valid is true, - * dok should be valid. - * - * At least one of dc_valid or usb_valid should be true. - */ - bool dc_valid; - /* - * USB is wired - * When usb_valid is true, - * uok should be valid. - */ - bool usb_valid; -}; - -#endif /* __MAX8903_CHARGER_H__ */ diff --git a/include/linux/property.h b/include/linux/property.h index 0a9001fe7aea..dd4687b56239 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -488,4 +488,11 @@ fwnode_create_software_node(const struct property_entry *properties, const struct fwnode_handle *parent); void fwnode_remove_software_node(struct fwnode_handle *fwnode); +int device_add_software_node(struct device *dev, const struct software_node *node); +void device_remove_software_node(struct device *dev); + +int device_create_managed_software_node(struct device *dev, + const struct property_entry *properties, + const struct software_node *parent); + #endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 49d155cd2dfe..b801ead1e2bb 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -66,6 +66,7 @@ enum sev_cmd { SEV_CMD_LAUNCH_MEASURE = 0x033, SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034, SEV_CMD_LAUNCH_FINISH = 0x035, + SEV_CMD_ATTESTATION_REPORT = 0x036, /* Guest migration commands (outgoing) */ SEV_CMD_SEND_START = 0x040, @@ -483,6 +484,22 @@ struct sev_data_dbg { u32 len; /* In */ } __packed; +/** + * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters + * + * @handle: handle of the VM + * @mnonce: a random nonce that will be included in the report. + * @address: physical address where the report will be copied. + * @len: length of the physical buffer. + */ +struct sev_data_attestation_report { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u8 mnonce[16]; /* In */ + u32 len; /* In/Out */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 2a9df80ea887..b5ebf6c01292 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -171,7 +171,7 @@ static inline void ptrace_event(int event, unsigned long message) * * Check whether @event is enabled and, if so, report @event and @pid * to the ptrace parent. @pid is reported as the pid_t seen from the - * the ptrace parent's pid namespace. + * ptrace parent's pid namespace. * * Called without locks. */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 4d58dc8943f0..e339b48de32d 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -470,7 +470,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) /** * @brief qed_chain_reset - Resets the chain to its start state * - * @param p_chain pointer to a previously allocted chain + * @param p_chain pointer to a previously allocated chain */ static inline void qed_chain_reset(struct qed_chain *p_chain) { diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d7db17996322..d31ecaf4fdd3 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -141,12 +141,18 @@ static inline void rb_insert_color_cached(struct rb_node *node, rb_insert_color(node, &root->rb_root); } -static inline void rb_erase_cached(struct rb_node *node, - struct rb_root_cached *root) + +static inline struct rb_node * +rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) { + struct rb_node *leftmost = NULL; + if (root->rb_leftmost == node) - root->rb_leftmost = rb_next(node); + leftmost = root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); + + return leftmost; } static inline void rb_replace_node_cached(struct rb_node *victim, @@ -158,4 +164,198 @@ static inline void rb_replace_node_cached(struct rb_node *victim, rb_replace_node(victim, new, &root->rb_root); } +/* + * The below helper functions use 2 operators with 3 different + * calling conventions. The operators are related like: + * + * comp(a->key,b) < 0 := less(a,b) + * comp(a->key,b) > 0 := less(b,a) + * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) + * + * If these operators define a partial order on the elements we make no + * guarantee on which of the elements matching the key is found. See + * rb_find(). + * + * The reason for this is to allow the find() interface without requiring an + * on-stack dummy object, which might not be feasible due to object size. + */ + +/** + * rb_add_cached() - insert @node into the leftmost cached tree @tree + * @node: node to insert + * @tree: leftmost cached tree to insert @node into + * @less: operator defining the (partial) node order + * + * Returns @node when it is the new leftmost, or NULL. + */ +static __always_inline struct rb_node * +rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, + bool (*less)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_root.rb_node; + struct rb_node *parent = NULL; + bool leftmost = true; + + while (*link) { + parent = *link; + if (less(node, parent)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; + leftmost = false; + } + } + + rb_link_node(node, parent, link); + rb_insert_color_cached(node, tree, leftmost); + + return leftmost ? node : NULL; +} + +/** + * rb_add() - insert @node into @tree + * @node: node to insert + * @tree: tree to insert @node into + * @less: operator defining the (partial) node order + */ +static __always_inline void +rb_add(struct rb_node *node, struct rb_root *tree, + bool (*less)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_node; + struct rb_node *parent = NULL; + + while (*link) { + parent = *link; + if (less(node, parent)) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + + rb_link_node(node, parent, link); + rb_insert_color(node, tree); +} + +/** + * rb_find_add() - find equivalent @node in @tree, or add @node + * @node: node to look-for / insert + * @tree: tree to search / modify + * @cmp: operator defining the node order + * + * Returns the rb_node matching @node, or NULL when no match is found and @node + * is inserted. + */ +static __always_inline struct rb_node * +rb_find_add(struct rb_node *node, struct rb_root *tree, + int (*cmp)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_node; + struct rb_node *parent = NULL; + int c; + + while (*link) { + parent = *link; + c = cmp(node, parent); + + if (c < 0) + link = &parent->rb_left; + else if (c > 0) + link = &parent->rb_right; + else + return parent; + } + + rb_link_node(node, parent, link); + rb_insert_color(node, tree); + return NULL; +} + +/** + * rb_find() - find @key in tree @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining the node order + * + * Returns the rb_node matching @key or NULL. + */ +static __always_inline struct rb_node * +rb_find(const void *key, const struct rb_root *tree, + int (*cmp)(const void *key, const struct rb_node *)) +{ + struct rb_node *node = tree->rb_node; + + while (node) { + int c = cmp(key, node); + + if (c < 0) + node = node->rb_left; + else if (c > 0) + node = node->rb_right; + else + return node; + } + + return NULL; +} + +/** + * rb_find_first() - find the first @key in @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + * + * Returns the leftmost node matching @key, or NULL. + */ +static __always_inline struct rb_node * +rb_find_first(const void *key, const struct rb_root *tree, + int (*cmp)(const void *key, const struct rb_node *)) +{ + struct rb_node *node = tree->rb_node; + struct rb_node *match = NULL; + + while (node) { + int c = cmp(key, node); + + if (c <= 0) { + if (!c) + match = node; + node = node->rb_left; + } else if (c > 0) { + node = node->rb_right; + } + } + + return match; +} + +/** + * rb_next_match() - find the next @key in @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + * + * Returns the next node matching @key, or NULL. + */ +static __always_inline struct rb_node * +rb_next_match(const void *key, struct rb_node *node, + int (*cmp)(const void *key, const struct rb_node *)) +{ + node = rb_next(node); + if (node && cmp(key, node)) + node = NULL; + return node; +} + +/** + * rb_for_each() - iterates a subtree matching @key + * @node: iterator + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + */ +#define rb_for_each(node, key, tree, cmp) \ + for ((node) = rb_find_first((key), (tree), (cmp)); \ + (node); (node) = rb_next_match((key), (node), (cmp))) + #endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index b36afe7b22c9..8afe886e85f1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -63,6 +63,122 @@ struct rcu_cblist { #define RCU_NEXT_TAIL 3 #define RCU_CBLIST_NSEGS 4 + +/* + * ==NOCB Offloading state machine== + * + * + * ---------------------------------------------------------------------------- + * | SEGCBLIST_SOFTIRQ_ONLY | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, without holding nocb_lock. | + * ---------------------------------------------------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | SEGCBLIST_OFFLOADED | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | + * | allowing nocb_timer to be armed. | + * ---------------------------------------------------------------------------- + * | + * v + * ----------------------------------- + * | | + * v v + * --------------------------------------- ----------------------------------| + * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | + * | | | | + * | | | | + * | CB kthread woke up and | | GP kthread woke up and | + * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED| + * | Processes callbacks concurrently | | | + * | with rcu_core(), holding | | | + * | nocb_lock. | | | + * --------------------------------------- ----------------------------------- + * | | + * ----------------------------------- + * | + * v + * |--------------------------------------------------------------------------| + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | + * | handling callbacks. | + * ---------------------------------------------------------------------------- + */ + + + +/* + * ==NOCB De-Offloading state machine== + * + * + * |--------------------------------------------------------------------------| + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | + * | ignores callbacks. | + * ---------------------------------------------------------------------------- + * | + * v + * |--------------------------------------------------------------------------| + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | CB/GP kthreads and local rcu_core() handle callbacks concurrently | + * | holding nocb_lock. Wake up CB and GP kthreads if necessary. | + * ---------------------------------------------------------------------------- + * | + * v + * ----------------------------------- + * | | + * v v + * ---------------------------------------------------------------------------| + * | | + * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | + * | | | + * | GP kthread woke up and | CB kthread woke up and | + * | acknowledged the fact that | acknowledged the fact that | + * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. | + * | | The CB kthread goes to sleep | + * | The callbacks from the target CPU | until it ever gets re-offloaded. | + * | will be ignored from the GP kthread | | + * | loop. | | + * ---------------------------------------------------------------------------- + * | | + * ----------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | 0 | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | + * | Flush pending nocb_timer. Flush nocb bypass callbacks. | + * ---------------------------------------------------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | SEGCBLIST_SOFTIRQ_ONLY | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, without holding nocb_lock. | + * ---------------------------------------------------------------------------- + */ +#define SEGCBLIST_ENABLED BIT(0) +#define SEGCBLIST_SOFTIRQ_ONLY BIT(1) +#define SEGCBLIST_KTHREAD_CB BIT(2) +#define SEGCBLIST_KTHREAD_GP BIT(3) +#define SEGCBLIST_OFFLOADED BIT(4) + struct rcu_segcblist { struct rcu_head *head; struct rcu_head **tails[RCU_CBLIST_NSEGS]; @@ -72,8 +188,8 @@ struct rcu_segcblist { #else long len; #endif - u8 enabled; - u8 offloaded; + long seglen[RCU_CBLIST_NSEGS]; + u8 flags; }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index fd02c5fa60cb..bd04f722714f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -33,6 +33,8 @@ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ulong2long(a) (*(long *)(&(a))) +#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b))) +#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b))) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); @@ -110,8 +112,14 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); +int rcu_nocb_cpu_offload(int cpu); +int rcu_nocb_cpu_deoffload(int cpu); +void rcu_nocb_flush_deferred_wakeup(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } +static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } +static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } +static inline void rcu_nocb_flush_deferred_wakeup(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** @@ -846,19 +854,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define __is_kvfree_rcu_offset(offset) ((offset) < 4096) -/* - * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. - */ -#define __kvfree_rcu(head, offset) \ - do { \ - BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \ - kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ - } while (0) - /** * kfree_rcu() - kfree an object after a grace period. - * @ptr: pointer to kfree - * @rhf: the name of the struct rcu_head within the type of @ptr. + * @ptr: pointer to kfree for both single- and double-argument invocations. + * @rhf: the name of the struct rcu_head within the type of @ptr, + * but only for double-argument invocations. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore @@ -871,7 +871,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in __kvfree_rcu(). If this error is triggered, you can + * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -881,13 +881,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu(ptr, rhf) \ -do { \ - typeof (ptr) ___p = (ptr); \ - \ - if (___p) \ - __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ -} while (0) +#define kfree_rcu kvfree_rcu /** * kvfree_rcu() - kvfree an object after a grace period. @@ -919,7 +913,17 @@ do { \ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME -#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu_arg_2(ptr, rhf) \ +do { \ + typeof (ptr) ___p = (ptr); \ + \ + if (___p) { \ + BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ + kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \ + (offsetof(typeof(*(ptr)), rhf))); \ + } \ +} while (0) + #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h deleted file mode 100644 index 3ab1ddf151a2..000000000000 --- a/include/linux/regulator/ab8500.h +++ /dev/null @@ -1,166 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * - * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson - * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson - * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson - */ - -#ifndef __LINUX_MFD_AB8500_REGULATOR_H -#define __LINUX_MFD_AB8500_REGULATOR_H - -#include <linux/platform_device.h> - -/* AB8500 regulators */ -enum ab8500_regulator_id { - AB8500_LDO_AUX1, - AB8500_LDO_AUX2, - AB8500_LDO_AUX3, - AB8500_LDO_INTCORE, - AB8500_LDO_TVOUT, - AB8500_LDO_AUDIO, - AB8500_LDO_ANAMIC1, - AB8500_LDO_ANAMIC2, - AB8500_LDO_DMIC, - AB8500_LDO_ANA, - AB8500_NUM_REGULATORS, -}; - -/* AB8505 regulators */ -enum ab8505_regulator_id { - AB8505_LDO_AUX1, - AB8505_LDO_AUX2, - AB8505_LDO_AUX3, - AB8505_LDO_AUX4, - AB8505_LDO_AUX5, - AB8505_LDO_AUX6, - AB8505_LDO_INTCORE, - AB8505_LDO_ADC, - AB8505_LDO_AUDIO, - AB8505_LDO_ANAMIC1, - AB8505_LDO_ANAMIC2, - AB8505_LDO_AUX8, - AB8505_LDO_ANA, - AB8505_NUM_REGULATORS, -}; - -/* AB8500 and AB8505 register initialization */ -struct ab8500_regulator_reg_init { - int id; - u8 mask; - u8 value; -}; - -#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \ - { \ - .id = _id, \ - .mask = _mask, \ - .value = _value, \ - } - -/* AB8500 registers */ -enum ab8500_regulator_reg { - AB8500_REGUREQUESTCTRL2, - AB8500_REGUREQUESTCTRL3, - AB8500_REGUREQUESTCTRL4, - AB8500_REGUSYSCLKREQ1HPVALID1, - AB8500_REGUSYSCLKREQ1HPVALID2, - AB8500_REGUHWHPREQ1VALID1, - AB8500_REGUHWHPREQ1VALID2, - AB8500_REGUHWHPREQ2VALID1, - AB8500_REGUHWHPREQ2VALID2, - AB8500_REGUSWHPREQVALID1, - AB8500_REGUSWHPREQVALID2, - AB8500_REGUSYSCLKREQVALID1, - AB8500_REGUSYSCLKREQVALID2, - AB8500_REGUMISC1, - AB8500_VAUDIOSUPPLY, - AB8500_REGUCTRL1VAMIC, - AB8500_VPLLVANAREGU, - AB8500_VREFDDR, - AB8500_EXTSUPPLYREGU, - AB8500_VAUX12REGU, - AB8500_VRF1VAUX3REGU, - AB8500_VAUX1SEL, - AB8500_VAUX2SEL, - AB8500_VRF1VAUX3SEL, - AB8500_REGUCTRL2SPARE, - AB8500_REGUCTRLDISCH, - AB8500_REGUCTRLDISCH2, - AB8500_NUM_REGULATOR_REGISTERS, -}; - -/* AB8505 registers */ -enum ab8505_regulator_reg { - AB8505_REGUREQUESTCTRL1, - AB8505_REGUREQUESTCTRL2, - AB8505_REGUREQUESTCTRL3, - AB8505_REGUREQUESTCTRL4, - AB8505_REGUSYSCLKREQ1HPVALID1, - AB8505_REGUSYSCLKREQ1HPVALID2, - AB8505_REGUHWHPREQ1VALID1, - AB8505_REGUHWHPREQ1VALID2, - AB8505_REGUHWHPREQ2VALID1, - AB8505_REGUHWHPREQ2VALID2, - AB8505_REGUSWHPREQVALID1, - AB8505_REGUSWHPREQVALID2, - AB8505_REGUSYSCLKREQVALID1, - AB8505_REGUSYSCLKREQVALID2, - AB8505_REGUVAUX4REQVALID, - AB8505_REGUMISC1, - AB8505_VAUDIOSUPPLY, - AB8505_REGUCTRL1VAMIC, - AB8505_VSMPSAREGU, - AB8505_VSMPSBREGU, - AB8505_VSAFEREGU, /* NOTE! PRCMU register */ - AB8505_VPLLVANAREGU, - AB8505_EXTSUPPLYREGU, - AB8505_VAUX12REGU, - AB8505_VRF1VAUX3REGU, - AB8505_VSMPSASEL1, - AB8505_VSMPSASEL2, - AB8505_VSMPSASEL3, - AB8505_VSMPSBSEL1, - AB8505_VSMPSBSEL2, - AB8505_VSMPSBSEL3, - AB8505_VSAFESEL1, /* NOTE! PRCMU register */ - AB8505_VSAFESEL2, /* NOTE! PRCMU register */ - AB8505_VSAFESEL3, /* NOTE! PRCMU register */ - AB8505_VAUX1SEL, - AB8505_VAUX2SEL, - AB8505_VRF1VAUX3SEL, - AB8505_VAUX4REQCTRL, - AB8505_VAUX4REGU, - AB8505_VAUX4SEL, - AB8505_REGUCTRLDISCH, - AB8505_REGUCTRLDISCH2, - AB8505_REGUCTRLDISCH3, - AB8505_CTRLVAUX5, - AB8505_CTRLVAUX6, - AB8505_NUM_REGULATOR_REGISTERS, -}; - -/* AB8500 external regulators */ -struct ab8500_ext_regulator_cfg { - bool hwreq; /* requires hw mode or high power mode */ -}; - -enum ab8500_ext_regulator_id { - AB8500_EXT_SUPPLY1, - AB8500_EXT_SUPPLY2, - AB8500_EXT_SUPPLY3, - AB8500_NUM_EXT_REGULATORS, -}; - -/* AB8500 regulator platform data */ -struct ab8500_regulator_platform_data { - int num_reg_init; - struct ab8500_regulator_reg_init *reg_init; - int num_regulator; - struct regulator_init_data *regulator; - int num_ext_regulator; - struct regulator_init_data *ext_regulator; -}; - -#endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 2024944fd2f7..20e84a84fb77 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -332,6 +332,12 @@ regulator_get_exclusive(struct device *dev, const char *id) } static inline struct regulator *__must_check +devm_regulator_get_exclusive(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { return ERR_PTR(-ENODEV); @@ -486,6 +492,11 @@ static inline int regulator_get_voltage(struct regulator *regulator) return -EINVAL; } +static inline int regulator_sync_voltage(struct regulator *regulator) +{ + return -EINVAL; +} + static inline int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { @@ -578,6 +589,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator return 0; } +static inline int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_set_suspend_voltage(struct regulator *regulator, + int min_uV, int max_uV, + suspend_state_t state) +{ + return -EINVAL; +} + static inline void *regulator_get_drvdata(struct regulator *regulator) { return NULL; diff --git a/include/linux/regulator/mt6315-regulator.h b/include/linux/regulator/mt6315-regulator.h new file mode 100644 index 000000000000..3b80d3f3910c --- /dev/null +++ b/include/linux/regulator/mt6315-regulator.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __LINUX_REGULATOR_MT6315_H +#define __LINUX_REGULATOR_MT6315_H + +#define MT6315_RP 3 +#define MT6315_PP 6 +#define MT6315_SP 7 + +enum { + MT6315_VBUCK1 = 0, + MT6315_VBUCK2, + MT6315_VBUCK3, + MT6315_VBUCK4, + MT6315_VBUCK_MAX, +}; + +/* Register */ +#define MT6315_TOP2_ELR7 0x139 +#define MT6315_TOP_TMA_KEY 0x39F +#define MT6315_TOP_TMA_KEY_H 0x3A0 +#define MT6315_BUCK_TOP_CON0 0x1440 +#define MT6315_BUCK_TOP_CON1 0x1443 +#define MT6315_BUCK_TOP_ELR0 0x1449 +#define MT6315_BUCK_TOP_ELR2 0x144B +#define MT6315_BUCK_TOP_ELR4 0x144D +#define MT6315_BUCK_TOP_ELR6 0x144F +#define MT6315_VBUCK1_DBG0 0x1499 +#define MT6315_VBUCK1_DBG4 0x149D +#define MT6315_VBUCK2_DBG0 0x1519 +#define MT6315_VBUCK2_DBG4 0x151D +#define MT6315_VBUCK3_DBG0 0x1599 +#define MT6315_VBUCK3_DBG4 0x159D +#define MT6315_VBUCK4_DBG0 0x1619 +#define MT6315_VBUCK4_DBG4 0x161D +#define MT6315_BUCK_TOP_4PHASE_ANA_CON42 0x16B1 + +#define PROTECTION_KEY_H 0x9C +#define PROTECTION_KEY 0xEA + +#endif /* __LINUX_REGULATOR_MT6315_H */ diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h index 1bbd3014f906..71902f41c919 100644 --- a/include/linux/regulator/pca9450.h +++ b/include/linux/regulator/pca9450.h @@ -147,6 +147,9 @@ enum { #define BUCK6_FPWM 0x04 #define BUCK6_ENMODE_MASK 0x03 +/* PCA9450_REG_BUCK123_PRESET_EN bit */ +#define BUCK123_PRESET_EN 0x80 + /* PCA9450_BUCK1OUT_DVS0 bits */ #define BUCK1OUT_DVS0_MASK 0x7F #define BUCK1OUT_DVS0_DEFAULT 0x14 @@ -216,4 +219,11 @@ enum { #define IRQ_THERM_105 0x02 #define IRQ_THERM_125 0x01 +/* PCA9450_REG_RESET_CTRL bits */ +#define WDOG_B_CFG_MASK 0xC0 +#define WDOG_B_CFG_NONE 0x00 +#define WDOG_B_CFG_WARM 0x40 +#define WDOG_B_CFG_COLD_LDO12 0x80 +#define WDOG_B_CFG_COLD 0xC0 + #endif /* __LINUX_REG_PCA9450_H__ */ diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index 647051662174..82b211518136 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -3,8 +3,6 @@ struct notifier_block; -#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) - /** * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc * processor. @@ -26,6 +24,8 @@ struct qcom_ssr_notify_data { bool crashed; }; +#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) + void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb); int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb); diff --git a/include/linux/reset.h b/include/linux/reset.h index 439fec7112a9..b9109efa2a5c 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -363,6 +363,25 @@ __must_check devm_reset_control_get_exclusive_released(struct device *dev, } /** + * devm_reset_control_get_optional_exclusive_released - resource managed + * reset_control_get_optional_exclusive_released() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed-and-optional variant of reset_control_get_exclusive_released(). For + * reset controllers returned from this function, reset_control_put() is called + * automatically on driver detach. + * + * See reset_control_get_exclusive_released() for more information. + */ +static inline struct reset_control * +__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev, + const char *id) +{ + return __devm_reset_control_get(dev, id, 0, false, true, false); +} + +/** * devm_reset_control_get_shared - resource managed reset_control_get_shared() * @dev: device to be reset by the controller * @id: reset line name diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 70085ca1a3fc..def5c62c93b3 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -213,7 +213,8 @@ struct page_vma_mapped_walk { static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - if (pvmw->pte) + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ + if (pvmw->pte && !PageHuge(pvmw->page)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h index daded9fddf36..22fc3a69b683 100644 --- a/include/linux/rpmsg/qcom_glink.h +++ b/include/linux/rpmsg/qcom_glink.h @@ -7,12 +7,17 @@ struct qcom_glink; +#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK) +void qcom_glink_ssr_notify(const char *ssr_name); +#else +static inline void qcom_glink_ssr_notify(const char *ssr_name) {} +#endif + #if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM) struct qcom_glink *qcom_glink_smem_register(struct device *parent, struct device_node *node); void qcom_glink_smem_unregister(struct qcom_glink *glink); -void qcom_glink_ssr_notify(const char *ssr_name); #else @@ -24,7 +29,6 @@ qcom_glink_smem_register(struct device *parent, } static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} -static inline void qcom_glink_ssr_notify(const char *ssr_name) {} #endif #endif diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 568909449c13..bd611e26291d 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -141,6 +141,8 @@ struct rtc_device { */ unsigned long set_offset_nsec; + unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)]; + time64_t range_min; timeu64_t range_max; time64_t start_secs; diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h deleted file mode 100644 index b31f2856733d..000000000000 --- a/include/linux/rtc/sirfsoc_rtciobrg.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * RTC I/O Bridge interfaces for CSR SiRFprimaII - * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module - * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. - */ -#ifndef _SIRFSOC_RTC_IOBRG_H_ -#define _SIRFSOC_RTC_IOBRG_H_ - -struct regmap_config; - -extern void sirfsoc_rtc_iobrg_besyncing(void); - -extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); - -extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); -struct regmap *devm_regmap_init_iobg(struct device *dev, - const struct regmap_config *config); - -#endif diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 3dcd617e65ae..7ce9a51ae5c0 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -128,4 +128,11 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +#ifdef arch_rwlock_is_contended +#define rwlock_is_contended(lock) \ + arch_rwlock_is_contended(&(lock)->raw_lock) +#else +#define rwlock_is_contended(lock) ((void)(lock), 0) +#endif /* arch_rwlock_is_contended */ + #endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e3a5eeec509..ef00bb22164c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -47,6 +47,7 @@ struct cfs_rq; struct fs_struct; struct futex_pi_state; struct io_context; +struct io_uring_task; struct mempolicy; struct nameidata; struct nsproxy; @@ -65,7 +66,6 @@ struct sighand_struct; struct signal_struct; struct task_delay_info; struct task_group; -struct io_uring_task; /* * Task state bitmask. NOTE! These bits are also @@ -895,6 +895,9 @@ struct task_struct { /* CLONE_CHILD_CLEARTID: */ int __user *clear_child_tid; + /* PF_IO_WORKER */ + void *pf_io_worker; + u64 utime; u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME @@ -1871,11 +1874,32 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, */ -#ifndef CONFIG_PREEMPTION -extern int _cond_resched(void); +#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) +extern int __cond_resched(void); + +#ifdef CONFIG_PREEMPT_DYNAMIC + +DECLARE_STATIC_CALL(cond_resched, __cond_resched); + +static __always_inline int _cond_resched(void) +{ + return static_call_mod(cond_resched)(); +} + #else + +static inline int _cond_resched(void) +{ + return __cond_resched(); +} + +#endif /* CONFIG_PREEMPT_DYNAMIC */ + +#else + static inline int _cond_resched(void) { return 0; } -#endif + +#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ #define cond_resched() ({ \ ___might_sleep(__FILE__, __LINE__, 0); \ @@ -1883,12 +1907,24 @@ static inline int _cond_resched(void) { return 0; } }) extern int __cond_resched_lock(spinlock_t *lock); +extern int __cond_resched_rwlock_read(rwlock_t *lock); +extern int __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) +#define cond_resched_rwlock_read(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_read(lock); \ +}) + +#define cond_resched_rwlock_write(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_write(lock); \ +}) + static inline void cond_resched_rcu(void) { #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) @@ -1912,6 +1948,23 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } +/* + * Check if a rwlock is contended. + * Returns non-zero if there is another task waiting on the rwlock. + * Returns zero if the lock is not contended or the system / underlying + * rwlock implementation does not support contention detection. + * Technically does not depend on CONFIG_PREEMPTION, but a general need + * for low latency. + */ +static inline int rwlock_needbreak(rwlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return rwlock_is_contended(lock); +#else + return 0; +#endif +} + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); @@ -1968,6 +2021,11 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); #define TASK_SIZE_OF(tsk) TASK_SIZE #endif +#ifdef CONFIG_SMP +/* Returns effective CPU energy utilization, as seen by the scheduler */ +unsigned long sched_cpu_util(int cpu, unsigned long max); +#endif /* CONFIG_SMP */ + #ifdef CONFIG_RSEQ /* diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 1ae08b8462a4..90b2a0bce11c 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -140,7 +140,8 @@ static inline bool in_vfork(struct task_struct *tsk) * another oom-unkillable task does this it should blame itself. */ rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + ret = tsk->vfork_done && + rcu_dereference(tsk->real_parent)->mm == tsk->mm; rcu_read_unlock(); return ret; diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index 7d64feafc408..ab83d85e1183 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -11,16 +11,9 @@ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority * values are inverted: lower p->prio value means higher priority. - * - * The MAX_USER_RT_PRIO value allows the actual maximum - * RT priority to be separate from the value exported to - * user-space. This allows kernel threads to set their - * priority to a value higher than any user task. Note: - * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. */ -#define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO +#define MAX_RT_PRIO 100 #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) @@ -34,15 +27,6 @@ #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) /* - * 'User priority' is the nice value converted to something we - * can work with better when scaling various scheduler parameters, - * it's a [ 0 ... 39 ] range. - */ -#define USER_PRIO(p) ((p)-MAX_RT_PRIO) -#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) -#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) - -/* * Convert nice value [19,-20] to rlimit style value [1,40]. */ static inline long nice_to_rlimit(long nice) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index c0f71f2e7160..ef02be869cf2 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -31,6 +31,7 @@ struct kernel_clone_args { /* Number of elements in *set_tid */ size_t set_tid_size; int cgroup; + int io_thread; struct cgroup *cgrp; struct css_set *cset; }; @@ -82,6 +83,7 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); diff --git a/include/linux/security.h b/include/linux/security.h index c35ea0ffccd9..8aeebd6646dc 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -145,13 +145,16 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); -extern int cap_inode_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); -extern int cap_inode_removexattr(struct dentry *dentry, const char *name); -extern int cap_inode_need_killpriv(struct dentry *dentry); -extern int cap_inode_killpriv(struct dentry *dentry); -extern int cap_inode_getsecurity(struct inode *inode, const char *name, - void **buffer, bool alloc); +int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +int cap_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name); +int cap_inode_need_killpriv(struct dentry *dentry); +int cap_inode_killpriv(struct user_namespace *mnt_userns, + struct dentry *dentry); +int cap_inode_getsecurity(struct user_namespace *mnt_userns, + struct inode *inode, const char *name, void **buffer, + bool alloc); extern int cap_mmap_addr(unsigned long addr); extern int cap_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags); @@ -324,6 +327,9 @@ void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, initxattrs initxattrs, void *fs_data); +int security_inode_init_security_anon(struct inode *inode, + const struct qstr *name, + const struct inode *context_inode); int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); @@ -345,16 +351,21 @@ int security_inode_follow_link(struct dentry *dentry, struct inode *inode, int security_inode_permission(struct inode *inode, int mask); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(const struct path *path); -int security_inode_setxattr(struct dentry *dentry, const char *name, +int security_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, size_t size, int flags); void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int security_inode_getxattr(struct dentry *dentry, const char *name); int security_inode_listxattr(struct dentry *dentry); -int security_inode_removexattr(struct dentry *dentry, const char *name); +int security_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name); int security_inode_need_killpriv(struct dentry *dentry); -int security_inode_killpriv(struct dentry *dentry); -int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); +int security_inode_killpriv(struct user_namespace *mnt_userns, + struct dentry *dentry); +int security_inode_getsecurity(struct user_namespace *mnt_userns, + struct inode *inode, const char *name, + void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); void security_inode_getsecid(struct inode *inode, u32 *secid); @@ -738,6 +749,13 @@ static inline int security_inode_init_security(struct inode *inode, return 0; } +static inline int security_inode_init_security_anon(struct inode *inode, + const struct qstr *name, + const struct inode *context_inode) +{ + return 0; +} + static inline int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, @@ -831,8 +849,9 @@ static inline int security_inode_getattr(const struct path *path) return 0; } -static inline int security_inode_setxattr(struct dentry *dentry, - const char *name, const void *value, size_t size, int flags) +static inline int security_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) { return cap_inode_setxattr(dentry, name, value, size, flags); } @@ -852,10 +871,11 @@ static inline int security_inode_listxattr(struct dentry *dentry) return 0; } -static inline int security_inode_removexattr(struct dentry *dentry, - const char *name) +static inline int security_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, + const char *name) { - return cap_inode_removexattr(dentry, name); + return cap_inode_removexattr(mnt_userns, dentry, name); } static inline int security_inode_need_killpriv(struct dentry *dentry) @@ -863,14 +883,18 @@ static inline int security_inode_need_killpriv(struct dentry *dentry) return cap_inode_need_killpriv(dentry); } -static inline int security_inode_killpriv(struct dentry *dentry) +static inline int security_inode_killpriv(struct user_namespace *mnt_userns, + struct dentry *dentry) { - return cap_inode_killpriv(dentry); + return cap_inode_killpriv(mnt_userns, dentry); } -static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) +static inline int security_inode_getsecurity(struct user_namespace *mnt_userns, + struct inode *inode, + const char *name, void **buffer, + bool alloc) { - return cap_inode_getsecurity(inode, name, buffer, alloc); + return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc); } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 2f7bb92b4c9e..f61e34fbaaea 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -664,10 +664,7 @@ typedef struct { * seqcount_latch_init() - runtime initializer for seqcount_latch_t * @s: Pointer to the seqcount_latch_t instance */ -static inline void seqcount_latch_init(seqcount_latch_t *s) -{ - seqcount_init(&s->seqcount); -} +#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) /** * raw_read_seqcount_latch() - pick even/odd latch data copy diff --git a/include/linux/sfi.h b/include/linux/sfi.h deleted file mode 100644 index e0e1597ef9e6..000000000000 --- a/include/linux/sfi.h +++ /dev/null @@ -1,210 +0,0 @@ -/* sfi.h Simple Firmware Interface */ - -/* - - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - - Copyright(c) 2009 Intel Corporation. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution - in the file called LICENSE.GPL. - - BSD LICENSE - - Copyright(c) 2009 Intel Corporation. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#ifndef _LINUX_SFI_H -#define _LINUX_SFI_H - -#include <linux/init.h> -#include <linux/types.h> - -/* Table signatures reserved by the SFI specification */ -#define SFI_SIG_SYST "SYST" -#define SFI_SIG_FREQ "FREQ" -#define SFI_SIG_IDLE "IDLE" -#define SFI_SIG_CPUS "CPUS" -#define SFI_SIG_MTMR "MTMR" -#define SFI_SIG_MRTC "MRTC" -#define SFI_SIG_MMAP "MMAP" -#define SFI_SIG_APIC "APIC" -#define SFI_SIG_XSDT "XSDT" -#define SFI_SIG_WAKE "WAKE" -#define SFI_SIG_DEVS "DEVS" -#define SFI_SIG_GPIO "GPIO" - -#define SFI_SIGNATURE_SIZE 4 -#define SFI_OEM_ID_SIZE 6 -#define SFI_OEM_TABLE_ID_SIZE 8 - -#define SFI_NAME_LEN 16 - -#define SFI_SYST_SEARCH_BEGIN 0x000E0000 -#define SFI_SYST_SEARCH_END 0x000FFFFF - -#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ - ((ptable->header.len - sizeof(struct sfi_table_header)) / \ - (sizeof(entry_type))) -/* - * Table structures must be byte-packed to match the SFI specification, - * as they are provided by the BIOS. - */ -struct sfi_table_header { - char sig[SFI_SIGNATURE_SIZE]; - u32 len; - u8 rev; - u8 csum; - char oem_id[SFI_OEM_ID_SIZE]; - char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; -} __packed; - -struct sfi_table_simple { - struct sfi_table_header header; - u64 pentry[1]; -} __packed; - -/* Comply with UEFI spec 2.1 */ -struct sfi_mem_entry { - u32 type; - u64 phys_start; - u64 virt_start; - u64 pages; - u64 attrib; -} __packed; - -struct sfi_cpu_table_entry { - u32 apic_id; -} __packed; - -struct sfi_cstate_table_entry { - u32 hint; /* MWAIT hint */ - u32 latency; /* latency in ms */ -} __packed; - -struct sfi_apic_table_entry { - u64 phys_addr; /* phy base addr for APIC reg */ -} __packed; - -struct sfi_freq_table_entry { - u32 freq_mhz; /* in MHZ */ - u32 latency; /* transition latency in ms */ - u32 ctrl_val; /* value to write to PERF_CTL */ -} __packed; - -struct sfi_wake_table_entry { - u64 phys_addr; /* pointer to where the wake vector locates */ -} __packed; - -struct sfi_timer_table_entry { - u64 phys_addr; /* phy base addr for the timer */ - u32 freq_hz; /* in HZ */ - u32 irq; -} __packed; - -struct sfi_rtc_table_entry { - u64 phys_addr; /* phy base addr for the RTC */ - u32 irq; -} __packed; - -struct sfi_device_table_entry { - u8 type; /* bus type, I2C, SPI or ...*/ -#define SFI_DEV_TYPE_SPI 0 -#define SFI_DEV_TYPE_I2C 1 -#define SFI_DEV_TYPE_UART 2 -#define SFI_DEV_TYPE_HSI 3 -#define SFI_DEV_TYPE_IPC 4 -#define SFI_DEV_TYPE_SD 5 - - u8 host_num; /* attached to host 0, 1...*/ - u16 addr; - u8 irq; - u32 max_freq; - char name[SFI_NAME_LEN]; -} __packed; - -struct sfi_gpio_table_entry { - char controller_name[SFI_NAME_LEN]; - u16 pin_no; - char pin_name[SFI_NAME_LEN]; -} __packed; - -typedef int (*sfi_table_handler) (struct sfi_table_header *table); - -#ifdef CONFIG_SFI -extern void __init sfi_init(void); -extern int __init sfi_platform_init(void); -extern void __init sfi_init_late(void); -extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, - sfi_table_handler handler); - -extern int sfi_disabled; -static inline void disable_sfi(void) -{ - sfi_disabled = 1; -} - -#else /* !CONFIG_SFI */ - -static inline void sfi_init(void) -{ -} - -static inline void sfi_init_late(void) -{ -} - -#define sfi_disabled 0 - -static inline int sfi_table_parse(char *signature, char *oem_id, - char *oem_table_id, - sfi_table_handler handler) -{ - return -1; -} - -#endif /* !CONFIG_SFI */ - -#endif /*_LINUX_SFI_H*/ diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h deleted file mode 100644 index a6e555cbe05c..000000000000 --- a/include/linux/sfi_acpi.h +++ /dev/null @@ -1,93 +0,0 @@ -/* sfi.h Simple Firmware Interface */ - -/* - - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - - Copyright(c) 2009 Intel Corporation. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - The full GNU General Public License is included in this distribution - in the file called LICENSE.GPL. - - BSD LICENSE - - Copyright(c) 2009 Intel Corporation. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#ifndef _LINUX_SFI_ACPI_H -#define _LINUX_SFI_ACPI_H - -#include <linux/acpi.h> -#include <linux/sfi.h> - -#ifdef CONFIG_SFI -extern int sfi_acpi_table_parse(char *signature, char *oem_id, - char *oem_table_id, - int (*handler)(struct acpi_table_header *)); - -static inline int __init acpi_sfi_table_parse(char *signature, - int (*handler)(struct acpi_table_header *)) -{ - if (!acpi_table_parse(signature, handler)) - return 0; - - return sfi_acpi_table_parse(signature, NULL, NULL, handler); -} -#else /* !CONFIG_SFI */ -static inline int sfi_acpi_table_parse(char *signature, char *oem_id, - char *oem_table_id, - int (*handler)(struct acpi_table_header *)) -{ - return -1; -} - -static inline int __init acpi_sfi_table_parse(char *signature, - int (*handler)(struct acpi_table_header *)) -{ - return acpi_table_parse(signature, handler); -} -#endif /* !CONFIG_SFI */ - -#endif /*_LINUX_SFI_ACPI_H*/ diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h deleted file mode 100644 index 50161b6afb61..000000000000 --- a/include/linux/sirfsoc_dma.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _SIRFSOC_DMA_H_ -#define _SIRFSOC_DMA_H_ - -bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id); - -#endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5f60c9e907c9..6d0a33d1c0db 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -430,28 +430,32 @@ enum { /* device driver is going to provide hardware time stamp */ SKBTX_IN_PROGRESS = 1 << 2, - /* device driver supports TX zero-copy buffers */ - SKBTX_DEV_ZEROCOPY = 1 << 3, - /* generate wifi status information (where possible) */ SKBTX_WIFI_STATUS = 1 << 4, - /* This indicates at least one fragment might be overwritten - * (as in vmsplice(), sendfile() ...) - * If we need to compute a TX checksum, we'll need to copy - * all frags to avoid possible bad checksum - */ - SKBTX_SHARED_FRAG = 1 << 5, - /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, }; -#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG) #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) +/* Definitions for flags in struct skb_shared_info */ +enum { + /* use zcopy routines */ + SKBFL_ZEROCOPY_ENABLE = BIT(0), + + /* This indicates at least one fragment might be overwritten + * (as in vmsplice(), sendfile() ...) + * If we need to compute a TX checksum, we'll need to copy + * all frags to avoid possible bad checksum + */ + SKBFL_SHARED_FRAG = BIT(1), +}; + +#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) + /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. @@ -461,7 +465,8 @@ enum { * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(struct ubuf_info *, bool zerocopy_success); + void (*callback)(struct sk_buff *, struct ubuf_info *, + bool zerocopy_success); union { struct { unsigned long desc; @@ -475,6 +480,7 @@ struct ubuf_info { }; }; refcount_t refcnt; + u8 flags; struct mmpin { struct user_struct *user; @@ -487,19 +493,14 @@ struct ubuf_info { int mm_account_pinned_pages(struct mmpin *mmp, size_t size); void mm_unaccount_pinned_pages(struct mmpin *mmp); -struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); -struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, - struct ubuf_info *uarg); - -static inline void sock_zerocopy_get(struct ubuf_info *uarg) -{ - refcount_inc(&uarg->refcnt); -} +struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size); +struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, + struct ubuf_info *uarg); -void sock_zerocopy_put(struct ubuf_info *uarg); -void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); +void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); -void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); +void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, + bool success); int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, @@ -510,7 +511,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, * the end of the header data, ie. at skb->end. */ struct skb_shared_info { - __u8 __unused; + __u8 flags; __u8 meta_len; __u8 nr_frags; __u8 tx_flags; @@ -1086,6 +1087,8 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size); +struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); + /** * alloc_skb - allocate a network buffer * @size: size to allocate @@ -1352,8 +1355,8 @@ void skb_flow_dissect_ct(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, - u16 *ctinfo_map, - size_t mapsize); + u16 *ctinfo_map, size_t mapsize, + bool post_ct); void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -1438,11 +1441,22 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) { - bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; + bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; return is_zcopy ? skb_uarg(skb) : NULL; } +static inline void net_zcopy_get(struct ubuf_info *uarg) +{ + refcount_inc(&uarg->refcnt); +} + +static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg) +{ + skb_shinfo(skb)->destructor_arg = uarg; + skb_shinfo(skb)->flags |= uarg->flags; +} + static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, bool *have_ref) { @@ -1450,16 +1464,15 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, if (unlikely(have_ref && *have_ref)) *have_ref = false; else - sock_zerocopy_get(uarg); - skb_shinfo(skb)->destructor_arg = uarg; - skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; + net_zcopy_get(uarg); + skb_zcopy_init(skb, uarg); } } static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) { skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); - skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; + skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; } static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) @@ -1472,33 +1485,32 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); } -/* Release a reference on a zerocopy structure */ -static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) +static inline void net_zcopy_put(struct ubuf_info *uarg) { - struct ubuf_info *uarg = skb_zcopy(skb); + if (uarg) + uarg->callback(NULL, uarg, true); +} +static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) +{ if (uarg) { - if (skb_zcopy_is_nouarg(skb)) { - /* no notification callback */ - } else if (uarg->callback == sock_zerocopy_callback) { - uarg->zerocopy = uarg->zerocopy && zerocopy; - sock_zerocopy_put(uarg); - } else { - uarg->callback(uarg, zerocopy); - } - - skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + if (uarg->callback == msg_zerocopy_callback) + msg_zerocopy_put_abort(uarg, have_uref); + else if (have_uref) + net_zcopy_put(uarg); } } -/* Abort a zerocopy operation and revert zckey on error in send syscall */ -static inline void skb_zcopy_abort(struct sk_buff *skb) +/* Release a reference on a zerocopy structure */ +static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) { struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { - sock_zerocopy_put_abort(uarg, false); - skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + if (!skb_zcopy_is_nouarg(skb)) + uarg->callback(skb, uarg, zerocopy_success); + + skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG; } } @@ -2777,7 +2789,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) if (likely(!skb_zcopy(skb))) return 0; if (!skb_zcopy_is_nouarg(skb) && - skb_uarg(skb)->callback == sock_zerocopy_callback) + skb_uarg(skb)->callback == msg_zerocopy_callback) return 0; return skb_copy_ubufs(skb, gfp_mask); } @@ -2808,7 +2820,26 @@ void skb_queue_purge(struct sk_buff_head *list); unsigned int skb_rbtree_purge(struct rb_root *root); -void *netdev_alloc_frag(unsigned int fragsz); +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); + +/** + * netdev_alloc_frag - allocate a page fragment + * @fragsz: fragment size + * + * Allocates a frag from a page for receive buffer. + * Uses GFP_ATOMIC allocations. + */ +static inline void *netdev_alloc_frag(unsigned int fragsz) +{ + return __netdev_alloc_frag_align(fragsz, ~0u); +} + +static inline void *netdev_alloc_frag_align(unsigned int fragsz, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __netdev_alloc_frag_align(fragsz, -align); +} struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); @@ -2867,7 +2898,20 @@ static inline void skb_free_frag(void *addr) page_frag_free(addr); } -void *napi_alloc_frag(unsigned int fragsz); +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); + +static inline void *napi_alloc_frag(unsigned int fragsz) +{ + return __napi_alloc_frag_align(fragsz, ~0u); +} + +static inline void *napi_alloc_frag_align(unsigned int fragsz, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __napi_alloc_frag_align(fragsz, -align); +} + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int length, gfp_t gfp_mask); static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, @@ -2877,7 +2921,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, } void napi_consume_skb(struct sk_buff *skb, int budget); -void __kfree_skb_flush(void); +void napi_skb_free_stolen_head(struct sk_buff *skb); void __kfree_skb_defer(struct sk_buff *skb); /** @@ -2929,12 +2973,28 @@ static inline struct page *dev_alloc_page(void) } /** + * dev_page_is_reusable - check whether a page can be reused for network Rx + * @page: the page to test + * + * A page shouldn't be considered for reusing/recycling if it was allocated + * under memory pressure or at a distant memory node. + * + * Returns false if this page should be returned to page allocator, true + * otherwise. + */ +static inline bool dev_page_is_reusable(const struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} + +/** * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page * @page: The page that was allocated from skb_alloc_page * @skb: The skb that may need pfmemalloc set */ -static inline void skb_propagate_pfmemalloc(struct page *page, - struct sk_buff *skb) +static inline void skb_propagate_pfmemalloc(const struct page *page, + struct sk_buff *skb) { if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; @@ -3324,7 +3384,7 @@ static inline int skb_linearize(struct sk_buff *skb) static inline bool skb_has_shared_frag(const struct sk_buff *skb) { return skb_is_nonlinear(skb) && - skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; + skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; } /** @@ -3849,7 +3909,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps); -void __skb_tstamp_tx(struct sk_buff *orig_skb, +void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, struct skb_shared_hwtstamps *hwtstamps, struct sock *sk, int tstype); @@ -4611,6 +4671,11 @@ static inline void skb_reset_redirect(struct sk_buff *skb) #endif } +static inline bool skb_csum_is_sctp(struct sk_buff *skb) +{ + return skb->csum_not_inet; +} + static inline void skb_set_kcov_handle(struct sk_buff *skb, const u64 kcov_handle) { diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index fec0c5ac1c4f..8edbbf5f2f93 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -390,7 +390,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk) } void sk_psock_stop(struct sock *sk, struct sk_psock *psock); -void sk_psock_destroy(struct rcu_head *rcu); void sk_psock_drop(struct sock *sk, struct sk_psock *psock); static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) diff --git a/include/linux/slab.h b/include/linux/slab.h index be4ba5867ac5..7ae604076767 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -186,6 +186,8 @@ void kfree(const void *); void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +bool kmem_valid_obj(void *object); +void kmem_dump_obj(void *object); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9eb430c163c2..3aa5e1e73ab6 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -2,6 +2,7 @@ #ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H +#include <linux/kfence.h> #include <linux/reciprocal_div.h> /* @@ -114,6 +115,8 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, static inline int objs_per_slab_page(const struct kmem_cache *cache, const struct page *page) { + if (is_kfence_address(page_address(page))) + return 1; return cache->num; } diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 1be0ed5befa1..dcde82a4434c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -7,6 +7,7 @@ * * (C) 2007 SGI, Christoph Lameter */ +#include <linux/kfence.h> #include <linux/kobject.h> #include <linux/reciprocal_div.h> @@ -185,6 +186,8 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache, static inline unsigned int obj_to_index(const struct kmem_cache *cache, const struct page *page, void *obj) { + if (is_kfence_address(obj)) + return 0; return __obj_to_index(cache, page_address(page), obj); } diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h index 8e884e0dda0a..f2b768852777 100644 --- a/include/linux/soc/brcmstb/brcmstb.h +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -2,6 +2,8 @@ #ifndef __BRCMSTB_SOC_H #define __BRCMSTB_SOC_H +#include <linux/kconfig.h> + static inline u32 BRCM_ID(u32 reg) { return reg >> 28 ? reg >> 16 : reg >> 8; @@ -12,6 +14,8 @@ static inline u32 BRCM_REV(u32 reg) return reg & 0xff; } +#if IS_ENABLED(CONFIG_SOC_BRCMSTB) + /* * Helper functions for getting family or product id from the * SoC driver. @@ -19,4 +23,16 @@ static inline u32 BRCM_REV(u32 reg) u32 brcmstb_get_family_id(void); u32 brcmstb_get_product_id(void); +#else +static inline u32 brcmstb_get_family_id(void) +{ + return 0; +} + +static inline u32 brcmstb_get_product_id(void) +{ + return 0; +} +#endif + #endif /* __BRCMSTB_SOC_H */ diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h index ae2279fe830a..28c04d918f0f 100644 --- a/include/linux/soc/marvell/octeontx2/asm.h +++ b/include/linux/soc/marvell/octeontx2/asm.h @@ -22,8 +22,16 @@ : [rs]"r" (ioaddr)); \ (result); \ }) +#define cn10k_lmt_flush(val, addr) \ +({ \ + __asm__ volatile(".cpu generic+lse\n" \ + "steor %x[rf],[%[rs]]" \ + : [rf]"+r"(val) \ + : [rs]"r"(addr)); \ +}) #else #define otx2_lmt_flush(ioaddr) ({ 0; }) +#define cn10k_lmt_flush(val, addr) ({ addr = val; }) #endif #endif /* __SOC_OTX2_ASM_H */ diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index e7842debc05d..4615a228da51 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -123,6 +123,14 @@ #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) +#define MT8167_TOP_AXI_PROT_EN_MM_EMI BIT(1) +#define MT8167_TOP_AXI_PROT_EN_MCU_MFG BIT(2) +#define MT8167_TOP_AXI_PROT_EN_CONN_EMI BIT(4) +#define MT8167_TOP_AXI_PROT_EN_MFG_EMI BIT(5) +#define MT8167_TOP_AXI_PROT_EN_CONN_MCU BIT(8) +#define MT8167_TOP_AXI_PROT_EN_MCU_CONN BIT(9) +#define MT8167_TOP_AXI_PROT_EN_MCU_MM BIT(11) + #define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) #define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) #define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 8e9996610978..ac6b5f3cba95 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -280,16 +280,4 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt); int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, void *data); -/** - * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet - * @pkt: the CMDQ packet - * - * Return: 0 for success; else the error code is returned - * - * Trigger CMDQ to execute the CMDQ packet. Note that this is a - * synchronous flush function. When the function returned, the recorded - * commands have been done. - */ -int cmdq_pkt_flush(struct cmdq_pkt *pkt); - #endif /* __MTK_CMDQ_H__ */ diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 3db6797ba6ff..64fc582ae415 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -29,6 +29,7 @@ #define LLCC_AUDHW 22 #define LLCC_NPU 23 #define LLCC_WLHW 24 +#define LLCC_CVP 28 #define LLCC_MODPE 29 #define LLCC_APTCM 30 #define LLCC_WRCACHE 31 @@ -79,6 +80,7 @@ struct llcc_edac_reg_data { * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array * @ecc_irq: interrupt for llcc cache error detection and reporting + * @major_version: Indicates the LLCC major version */ struct llcc_drv_data { struct regmap *regmap; @@ -91,6 +93,7 @@ struct llcc_drv_data { unsigned long *bitmap; u32 *offsets; int ecc_irq; + u32 major_version; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index e600baec6825..afd47217996b 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -11,6 +11,8 @@ struct device; struct firmware; +#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) + ssize_t qcom_mdt_get_size(const struct firmware *fw); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, @@ -23,4 +25,37 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, phys_addr_t *reloc_base); void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); +#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ + +static inline ssize_t qcom_mdt_get_size(const struct firmware *fw) +{ + return -ENODEV; +} + +static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, + void *mem_region, phys_addr_t mem_phys, + size_t mem_size, phys_addr_t *reloc_base) +{ + return -ENODEV; +} + +static inline int qcom_mdt_load_no_init(struct device *dev, + const struct firmware *fw, + const char *fw_name, int pas_id, + void *mem_region, phys_addr_t mem_phys, + size_t mem_size, + phys_addr_t *reloc_base) +{ + return -ENODEV; +} + +static inline void *qcom_mdt_read_metadata(const struct firmware *fw, + size_t *data_len) +{ + return ERR_PTR(-ENODEV); +} + +#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ + #endif diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h index 374d0fdb0743..1e3c92feea6e 100644 --- a/include/linux/sony-laptop.h +++ b/include/linux/sony-laptop.h @@ -31,7 +31,7 @@ #if IS_ENABLED(CONFIG_SONY_LAPTOP) int sony_pic_camera_command(int command, u8 value); #else -static inline int sony_pic_camera_command(int command, u8 value) { return 0; }; +static inline int sony_pic_camera_command(int command, u8 value) { return 0; } #endif #endif /* __KERNEL__ */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index f0b01b728640..d08039d65825 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -1005,6 +1005,8 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus); int sdw_read(struct sdw_slave *slave, u32 addr); int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); +int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value); +int sdw_read_no_pm(struct sdw_slave *slave, u32 addr); int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 120ffddc03d2..3a5446ac014a 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -187,4 +187,6 @@ void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable); irqreturn_t sdw_intel_thread(int irq, void *dev_id); +#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1) + #endif diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h deleted file mode 100644 index 6d19b09139d0..000000000000 --- a/include/linux/spi/ifx_modem.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef LINUX_IFX_MODEM_H -#define LINUX_IFX_MODEM_H - -struct ifx_modem_platform_data { - unsigned short tx_pwr; /* modem power threshold */ - unsigned char modem_type; /* Modem type */ - unsigned long max_hz; /* max SPI frequency */ - unsigned short use_dma:1; /* spi protocol driver supplies - dma-able addrs */ -}; -#define IFX_MODEM_6160 1 -#define IFX_MODEM_6260 2 - -#endif diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h deleted file mode 100644 index f237b2d062e9..000000000000 --- a/include/linux/spi/lms283gf05.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD - * - * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> -*/ - -#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ -#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ - -struct lms283gf05_pdata { - unsigned long reset_gpio; - bool reset_inverted; -}; - -#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 159463cc659c..2b65c9edc34e 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -311,6 +311,9 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); +bool spi_mem_dtr_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op); + #else static inline int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, @@ -334,6 +337,12 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, return false; } +static inline +bool spi_mem_dtr_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + return false; +} #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index aa09fdc8042d..592897fa4f03 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -6,6 +6,7 @@ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H +#include <linux/bits.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> @@ -15,6 +16,8 @@ #include <linux/gpio/consumer.h> #include <linux/ptp_clock_kernel.h> +#include <uapi/linux/spi/spi.h> + struct dma_chan; struct property_entry; struct spi_controller; @@ -164,28 +167,19 @@ struct spi_device { u8 chip_select; u8 bits_per_word; bool rt; +#define SPI_NO_TX BIT(31) /* no transmit wire */ +#define SPI_NO_RX BIT(30) /* no receive wire */ + /* + * All bits defined above should be covered by SPI_MODE_KERNEL_MASK. + * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart, + * which is defined in 'include/uapi/linux/spi/spi.h'. + * The bits defined here are from bit 31 downwards, while in + * SPI_MODE_USER_MASK are from 0 upwards. + * These bits must not overlap. A static assert check should make sure of that. + * If adding extra bits, make sure to decrease the bit index below as well. + */ +#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1)) u32 mode; -#define SPI_CPHA 0x01 /* clock phase */ -#define SPI_CPOL 0x02 /* clock polarity */ -#define SPI_MODE_0 (0|0) /* (original MicroWire) */ -#define SPI_MODE_1 (0|SPI_CPHA) -#define SPI_MODE_2 (SPI_CPOL|0) -#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) -#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA) -#define SPI_CS_HIGH 0x04 /* chipselect active high? */ -#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ -#define SPI_3WIRE 0x10 /* SI/SO signals shared */ -#define SPI_LOOP 0x20 /* loopback mode */ -#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ -#define SPI_READY 0x80 /* slave pulls low to pause */ -#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ -#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ -#define SPI_RX_DUAL 0x400 /* receive with 2 wires */ -#define SPI_RX_QUAD 0x800 /* receive with 4 wires */ -#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ -#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ -#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ -#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */ int irq; void *controller_state; void *controller_data; @@ -208,6 +202,10 @@ struct spi_device { */ }; +/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */ +static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, + "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap"); + static inline struct spi_device *to_spi_device(struct device *dev) { return dev ? container_of(dev, struct spi_device, dev) : NULL; @@ -624,7 +622,7 @@ struct spi_controller { /* * These hooks are for drivers that use a generic implementation - * of transfer_one_message() provied by the core. + * of transfer_one_message() provided by the core. */ void (*set_cs)(struct spi_device *spi, bool enable); int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, @@ -827,6 +825,7 @@ extern void spi_res_release(struct spi_controller *ctlr, * transfer. If 0 the default (from @spi_device) is used. * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. + * @dummy_data: indicates transfer is dummy bytes transfer. * @cs_change: affects chipselect after this transfer completes * @cs_change_delay: delay between cs deassert and assert when * @cs_change is set and @spi_transfer is not the last in @spi_message @@ -939,6 +938,7 @@ struct spi_transfer { struct sg_table tx_sg; struct sg_table rx_sg; + unsigned dummy_data:1; unsigned cs_change:1; unsigned tx_nbits:3; unsigned rx_nbits:3; diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e432cc92c73d..a0895bbf71ce 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -60,6 +60,9 @@ void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 5a5a1941ca15..0e0cf4d6a72a 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -15,7 +15,8 @@ struct srcu_struct { short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ - short srcu_idx; /* Current reader array element. */ + unsigned short srcu_idx; /* Current reader array element in bit 0x2. */ + unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */ u8 srcu_gp_running; /* GP workqueue running? */ u8 srcu_gp_waiting; /* GP waiting for readers? */ struct swait_queue_head srcu_wq; @@ -59,7 +60,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; - idx = READ_ONCE(ssp->srcu_idx); + idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); return idx; } @@ -80,7 +81,7 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, { int idx; - idx = READ_ONCE(ssp->srcu_idx) & 0x1; + idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, READ_ONCE(ssp->srcu_lock_nesting[!idx]), diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 31593b34608e..15ba0df1ee0d 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h @@ -76,7 +76,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) if (dev) return !!(dev->dev->bus->sprom.boardflags_lo & SSB_GIGE_BFL_ROBOSWITCH); - return 0; + return false; } /* Returns whether we can only do one DMA at once. */ @@ -86,7 +86,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) if (dev) return ((dev->dev->bus->chip_id == 0x4785) && (dev->dev->bus->chip_rev < 2)); - return 0; + return false; } /* Returns whether we must flush posted writes. */ @@ -159,7 +159,7 @@ static inline void ssb_gige_exit(void) static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) { - return 0; + return false; } static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) { @@ -167,19 +167,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) } static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) { - return 0; + return false; } static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) { - return 0; + return false; } static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) { - return 0; + return false; } static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) { - return 0; + return false; } static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) { diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 24d49c732341..6bb4bc1a5f54 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -21,4 +21,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); +#ifdef CONFIG_STACKDEPOT +int stack_depot_init(void); +#else +static inline int stack_depot_init(void) +{ + return 0; +} +#endif /* CONFIG_STACKDEPOT */ + #endif diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 695da4c9b338..85ecc789f4ff 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -107,26 +107,10 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name) -/* - * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from - * the symbol table so that objtool can reference it when it generates the - * .static_call_sites section. - */ -#define __static_call(name) \ -({ \ - __ADDRESSABLE(STATIC_CALL_KEY(name)); \ - &STATIC_CALL_TRAMP(name); \ -}) - #else #define STATIC_CALL_TRAMP_ADDR(name) NULL #endif - -#define DECLARE_STATIC_CALL(name, func) \ - extern struct static_call_key STATIC_CALL_KEY(name); \ - extern typeof(func) STATIC_CALL_TRAMP(name); - #define static_call_update(name, func) \ ({ \ BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ @@ -154,17 +138,25 @@ struct static_call_key { }; }; +/* For finding the key associated with a trampoline */ +struct static_call_tramp_key { + s32 tramp; + s32 key; +}; + extern void __static_call_update(struct static_call_key *key, void *tramp, void *func); extern int static_call_mod_init(struct module *mod); extern int static_call_text_reserved(void *start, void *end); -#define DEFINE_STATIC_CALL(name, _func) \ +extern long __static_call_return0(void); + +#define __DEFINE_STATIC_CALL(name, _func, _func_init) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = _func, \ + .func = _func_init, \ .type = 1, \ }; \ - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) #define DEFINE_STATIC_CALL_NULL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ @@ -174,17 +166,23 @@ extern int static_call_text_reserved(void *start, void *end); }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) -#define static_call(name) __static_call(name) #define static_call_cond(name) (void)__static_call(name) #define EXPORT_STATIC_CALL(name) \ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) - #define EXPORT_STATIC_CALL_GPL(name) \ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) +/* Leave the key unexported, so modules can't change static call targets: */ +#define EXPORT_STATIC_CALL_TRAMP(name) \ + EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \ + ARCH_ADD_TRAMP_KEY(name) +#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ + EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \ + ARCH_ADD_TRAMP_KEY(name) + #elif defined(CONFIG_HAVE_STATIC_CALL) static inline int static_call_init(void) { return 0; } @@ -193,12 +191,12 @@ struct static_call_key { void *func; }; -#define DEFINE_STATIC_CALL(name, _func) \ +#define __DEFINE_STATIC_CALL(name, _func, _func_init) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = _func, \ + .func = _func_init, \ }; \ - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) #define DEFINE_STATIC_CALL_NULL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ @@ -207,7 +205,6 @@ struct static_call_key { }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) -#define static_call(name) __static_call(name) #define static_call_cond(name) (void)__static_call(name) static inline @@ -224,14 +221,24 @@ static inline int static_call_text_reserved(void *start, void *end) return 0; } +static inline long __static_call_return0(void) +{ + return 0; +} + #define EXPORT_STATIC_CALL(name) \ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) - #define EXPORT_STATIC_CALL_GPL(name) \ EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) +/* Leave the key unexported, so modules can't change static call targets: */ +#define EXPORT_STATIC_CALL_TRAMP(name) \ + EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) +#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \ + EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) + #else /* Generic implementation */ static inline int static_call_init(void) { return 0; } @@ -240,10 +247,15 @@ struct static_call_key { void *func; }; -#define DEFINE_STATIC_CALL(name, _func) \ +static inline long __static_call_return0(void) +{ + return 0; +} + +#define __DEFINE_STATIC_CALL(name, _func, _func_init) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = _func, \ + .func = _func_init, \ } #define DEFINE_STATIC_CALL_NULL(name, _func) \ @@ -252,9 +264,6 @@ struct static_call_key { .func = NULL, \ } -#define static_call(name) \ - ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func)) - static inline void __static_call_nop(void) { } /* @@ -295,4 +304,10 @@ static inline int static_call_text_reserved(void *start, void *end) #endif /* CONFIG_HAVE_STATIC_CALL */ +#define DEFINE_STATIC_CALL(name, _func) \ + __DEFINE_STATIC_CALL(name, _func, _func) + +#define DEFINE_STATIC_CALL_RET0(name, _func) \ + __DEFINE_STATIC_CALL(name, _func, __static_call_return0) + #endif /* _LINUX_STATIC_CALL_H */ diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h index 89135bb35bf7..ae5662d368b9 100644 --- a/include/linux/static_call_types.h +++ b/include/linux/static_call_types.h @@ -4,11 +4,13 @@ #include <linux/types.h> #include <linux/stringify.h> +#include <linux/compiler.h> #define STATIC_CALL_KEY_PREFIX __SCK__ #define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX) #define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1) #define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name) +#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name)) #define STATIC_CALL_TRAMP_PREFIX __SCT__ #define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX) @@ -32,4 +34,52 @@ struct static_call_site { s32 key; }; +#define DECLARE_STATIC_CALL(name, func) \ + extern struct static_call_key STATIC_CALL_KEY(name); \ + extern typeof(func) STATIC_CALL_TRAMP(name); + +#ifdef CONFIG_HAVE_STATIC_CALL + +#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name)) + +#ifdef CONFIG_HAVE_STATIC_CALL_INLINE + +/* + * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from + * the symbol table so that objtool can reference it when it generates the + * .static_call_sites section. + */ +#define __STATIC_CALL_ADDRESSABLE(name) \ + __ADDRESSABLE(STATIC_CALL_KEY(name)) + +#define __static_call(name) \ +({ \ + __STATIC_CALL_ADDRESSABLE(name); \ + __raw_static_call(name); \ +}) + +#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */ + +#define __STATIC_CALL_ADDRESSABLE(name) +#define __static_call(name) __raw_static_call(name) + +#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */ + +#ifdef MODULE +#define __STATIC_CALL_MOD_ADDRESSABLE(name) +#define static_call_mod(name) __raw_static_call(name) +#else +#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name) +#define static_call_mod(name) __static_call(name) +#endif + +#define static_call(name) __static_call(name) + +#else + +#define static_call(name) \ + ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func)) + +#endif /* CONFIG_HAVE_STATIC_CALL */ + #endif /* _STATIC_CALL_TYPES_H */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 15ca6b4167cc..a302982de2d7 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -202,5 +202,6 @@ struct plat_stmmacenet_data { bool vlan_fail_q_en; u8 vlan_fail_q; unsigned int eee_usecs_rate; + struct pci_dev *pdev; }; #endif diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 30577c3aecf8..46fb3ebdd16e 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -128,7 +128,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ -static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, +static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { unsigned long flags; @@ -139,14 +139,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, return ret; } -static inline int stop_machine(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static __always_inline int +stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { return stop_machine_cpuslocked(fn, data, cpus); } -static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static __always_inline int +stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) { return stop_machine(fn, data, cpus); } diff --git a/include/linux/string.h b/include/linux/string.h index 4fcfb56abcf5..9521d8cab18e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -266,287 +266,7 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) - -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) -extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); -extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); -extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); -extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); -extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); -extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); -extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); -extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); -extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); -extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); -#else -#define __underlying_memchr __builtin_memchr -#define __underlying_memcmp __builtin_memcmp -#define __underlying_memcpy __builtin_memcpy -#define __underlying_memmove __builtin_memmove -#define __underlying_memset __builtin_memset -#define __underlying_strcat __builtin_strcat -#define __underlying_strcpy __builtin_strcpy -#define __underlying_strlen __builtin_strlen -#define __underlying_strncat __builtin_strncat -#define __underlying_strncpy __builtin_strncpy -#endif - -__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 1); - if (__builtin_constant_p(size) && p_size < size) - __write_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_strncpy(p, q, size); -} - -__FORTIFY_INLINE char *strcat(char *p, const char *q) -{ - size_t p_size = __builtin_object_size(p, 1); - if (p_size == (size_t)-1) - return __underlying_strcat(p, q); - if (strlcat(p, q, p_size) >= p_size) - fortify_panic(__func__); - return p; -} - -__FORTIFY_INLINE __kernel_size_t strlen(const char *p) -{ - __kernel_size_t ret; - size_t p_size = __builtin_object_size(p, 1); - - /* Work around gcc excess stack consumption issue */ - if (p_size == (size_t)-1 || - (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __underlying_strlen(p); - ret = strnlen(p, p_size); - if (p_size <= ret) - fortify_panic(__func__); - return ret; -} - -extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); -__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) -{ - size_t p_size = __builtin_object_size(p, 1); - __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); - if (p_size <= ret && maxlen != ret) - fortify_panic(__func__); - return ret; -} - -/* defined after fortified strlen to reuse it */ -extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); -__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) -{ - size_t ret; - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __real_strlcpy(p, q, size); - ret = strlen(q); - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - if (__builtin_constant_p(len) && len >= p_size) - __write_overflow(); - if (len >= p_size) - fortify_panic(__func__); - __underlying_memcpy(p, q, len); - p[len] = '\0'; - } - return ret; -} - -/* defined after fortified strnlen to reuse it */ -extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); -__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) -{ - size_t len; - /* Use string size rather than possible enclosing struct size. */ - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - - /* If we cannot get size of p and q default to call strscpy. */ - if (p_size == (size_t) -1 && q_size == (size_t) -1) - return __real_strscpy(p, q, size); - - /* - * If size can be known at compile time and is greater than - * p_size, generate a compile time write overflow error. - */ - if (__builtin_constant_p(size) && size > p_size) - __write_overflow(); - - /* - * This call protects from read overflow, because len will default to q - * length if it smaller than size. - */ - len = strnlen(q, size); - /* - * If len equals size, we will copy only size bytes which leads to - * -E2BIG being returned. - * Otherwise we will copy len + 1 because of the final '\O'. - */ - len = len == size ? size : len + 1; - - /* - * Generate a runtime write overflow error if len is greater than - * p_size. - */ - if (len > p_size) - fortify_panic(__func__); - - /* - * We can now safely call vanilla strscpy because we are protected from: - * 1. Read overflow thanks to call to strnlen(). - * 2. Write overflow thanks to above ifs. - */ - return __real_strscpy(p, q, len); -} - -/* defined after fortified strlen and strnlen to reuse them */ -__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) -{ - size_t p_len, copy_len; - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __underlying_strncat(p, q, count); - p_len = strlen(p); - copy_len = strnlen(q, count); - if (p_size < p_len + copy_len + 1) - fortify_panic(__func__); - __underlying_memcpy(p + p_len, q, copy_len); - p[p_len + copy_len] = '\0'; - return p; -} - -__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __write_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_memset(p, c, size); -} - -__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __write_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memcpy(p, q, size); -} - -__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __write_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memmove(p, q, size); -} - -extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); -__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_memscan(p, c, size); -} - -__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __read_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memcmp(p, q, size); -} - -__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_memchr(p, c, size); -} - -void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); -__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_memchr_inv(p, c, size); -} - -extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); -__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_kmemdup(p, size, gfp); -} - -/* defined after fortified strlen and memcpy to reuse them */ -__FORTIFY_INLINE char *strcpy(char *p, const char *q) -{ - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - size_t size; - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __underlying_strcpy(p, q); - size = strlen(q) + 1; - /* test here to use the more stringent object size */ - if (p_size < size) - fortify_panic(__func__); - memcpy(p, q, size); - return p; -} - -/* Don't use these outside the FORITFY_SOURCE implementation */ -#undef __underlying_memchr -#undef __underlying_memcmp -#undef __underlying_memcpy -#undef __underlying_memmove -#undef __underlying_memset -#undef __underlying_strcat -#undef __underlying_strcpy -#undef __underlying_strlen -#undef __underlying_strncat -#undef __underlying_strncpy +#include <linux/fortify-string.h> #endif /** diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 43f854487539..938c2bf29db8 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -10,9 +10,6 @@ #define RPC_VERSION 2 -/* size of an XDR encoding unit in bytes, i.e. 32bit */ -#define XDR_UNIT (4) - /* spec defines authentication flavor as an unsigned 32 bit integer */ typedef u32 rpc_authflavor_t; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 34c2a69820e9..31ee3b6047c3 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -463,6 +463,7 @@ struct svc_procedure { unsigned int pc_ressize; /* result struct size */ unsigned int pc_cachetype; /* cache info (NFS) */ unsigned int pc_xdrressize; /* maximum size of XDR reply */ + const char * pc_name; /* for display */ }; /* diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 294b56e61522..7c693b31965e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -49,6 +49,7 @@ #include <linux/sunrpc/rpc_rdma_cid.h> #include <linux/sunrpc/svc_rdma_pcl.h> +#include <linux/percpu_counter.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> @@ -65,15 +66,10 @@ extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_bc_requests; extern unsigned int svcrdma_max_req_size; -extern atomic_t rdma_stat_recv; -extern atomic_t rdma_stat_read; -extern atomic_t rdma_stat_write; -extern atomic_t rdma_stat_sq_starve; -extern atomic_t rdma_stat_rq_starve; -extern atomic_t rdma_stat_rq_poll; -extern atomic_t rdma_stat_rq_prod; -extern atomic_t rdma_stat_sq_poll; -extern atomic_t rdma_stat_sq_prod; +extern struct percpu_counter svcrdma_stat_read; +extern struct percpu_counter svcrdma_stat_recv; +extern struct percpu_counter svcrdma_stat_sq_starve; +extern struct percpu_counter svcrdma_stat_write; struct svcxprt_rdma { struct svc_xprt sc_xprt; /* SVC transport structure */ @@ -108,6 +104,7 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; + u32 sc_pending_recvs; struct list_head sc_read_complete_q; struct work_struct sc_work; diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index b7ac7fe68306..bcc555c7ae9c 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -35,6 +35,8 @@ struct svc_sock { /* Total length of the data (not including fragment headers) * received so far in the fragments making up this rpc: */ u32 sk_datalen; + /* Number of queued send requests */ + atomic_t sk_sendqlen; struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ }; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 19b6dea27367..2bc75c167f00 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -20,13 +20,19 @@ struct bio_vec; struct rpc_rqst; /* + * Size of an XDR encoding unit in bytes, i.e. 32 bits, + * as defined in Section 3 of RFC 4506. All encoded + * XDR data items are aligned on a boundary of 32 bits. + */ +#define XDR_UNIT sizeof(__be32) + +/* * Buffer adjustment */ #define XDR_QUADLEN(l) (((l) + 3) >> 2) /* - * Generic opaque `network object.' At the kernel level, this type - * is used only by lockd. + * Generic opaque `network object.' */ #define XDR_MAX_NETOBJ 1024 struct xdr_netobj { @@ -330,7 +336,7 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, static inline size_t xdr_align_size(size_t n) { - const size_t mask = sizeof(__u32) - 1; + const size_t mask = XDR_UNIT - 1; return (n + mask) & ~mask; } @@ -360,7 +366,7 @@ static inline size_t xdr_pad_size(size_t n) */ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) { - const size_t len = sizeof(__be32); + const size_t len = XDR_UNIT; __be32 *p = xdr_reserve_space(xdr, len); if (unlikely(!p)) @@ -379,7 +385,7 @@ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) */ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) { - const size_t len = sizeof(__be32); + const size_t len = XDR_UNIT; __be32 *p = xdr_reserve_space(xdr, len); if (unlikely(!p)) diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h index 7e75bb0346d0..bf0d365f471c 100644 --- a/include/linux/sunxi-rsb.h +++ b/include/linux/sunxi-rsb.h @@ -59,7 +59,7 @@ static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, struct sunxi_rsb_driver { struct device_driver driver; int (*probe)(struct sunxi_rsb_device *rdev); - int (*remove)(struct sunxi_rsb_device *rdev); + void (*remove)(struct sunxi_rsb_device *rdev); }; static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h new file mode 100644 index 000000000000..8e3e86c7d78c --- /dev/null +++ b/include/linux/surface_acpi_notify.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Interface for Surface ACPI Notify (SAN) driver. + * + * Provides access to discrete GPU notifications sent from ACPI via the SAN + * driver, which are not handled by this driver directly. + * + * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H +#define _LINUX_SURFACE_ACPI_NOTIFY_H + +#include <linux/notifier.h> +#include <linux/types.h> + +/** + * struct san_dgpu_event - Discrete GPU ACPI event. + * @category: Category of the event. + * @target: Target ID of the event source. + * @command: Command ID of the event. + * @instance: Instance ID of the event source. + * @length: Length of the event's payload data (in bytes). + * @payload: Pointer to the event's payload data. + */ +struct san_dgpu_event { + u8 category; + u8 target; + u8 command; + u8 instance; + u16 length; + u8 *payload; +}; + +int san_client_link(struct device *client); +int san_dgpu_notifier_register(struct notifier_block *nb); +int san_dgpu_notifier_unregister(struct notifier_block *nb); + +#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */ diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h new file mode 100644 index 000000000000..f4b1ba887384 --- /dev/null +++ b/include/linux/surface_aggregator/controller.h @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Surface System Aggregator Module (SSAM) controller interface. + * + * Main communication interface for the SSAM EC. Provides a controller + * managing access and communication to and from the SSAM EC, as well as main + * communication structures and definitions. + * + * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H +#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/serial_hub.h> + + +/* -- Main data types and definitions --------------------------------------- */ + +/** + * enum ssam_event_flags - Flags for enabling/disabling SAM events + * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame. + */ +enum ssam_event_flags { + SSAM_EVENT_SEQUENCED = BIT(0), +}; + +/** + * struct ssam_event - SAM event sent from the EC to the host. + * @target_category: Target category of the event source. See &enum ssam_ssh_tc. + * @target_id: Target ID of the event source. + * @command_id: Command ID of the event. + * @instance_id: Instance ID of the event source. + * @length: Length of the event payload in bytes. + * @data: Event payload data. + */ +struct ssam_event { + u8 target_category; + u8 target_id; + u8 command_id; + u8 instance_id; + u16 length; + u8 data[]; +}; + +/** + * enum ssam_request_flags - Flags for SAM requests. + * + * @SSAM_REQUEST_HAS_RESPONSE: + * Specifies that the request expects a response. If not set, the request + * will be directly completed after its underlying packet has been + * transmitted. If set, the request transport system waits for a response + * of the request. + * + * @SSAM_REQUEST_UNSEQUENCED: + * Specifies that the request should be transmitted via an unsequenced + * packet. If set, the request must not have a response, meaning that this + * flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive. + */ +enum ssam_request_flags { + SSAM_REQUEST_HAS_RESPONSE = BIT(0), + SSAM_REQUEST_UNSEQUENCED = BIT(1), +}; + +/** + * struct ssam_request - SAM request description. + * @target_category: Category of the request's target. See &enum ssam_ssh_tc. + * @target_id: ID of the request's target. + * @command_id: Command ID of the request. + * @instance_id: Instance ID of the request's target. + * @flags: Flags for the request. See &enum ssam_request_flags. + * @length: Length of the request payload in bytes. + * @payload: Request payload data. + * + * This struct fully describes a SAM request with payload. It is intended to + * help set up the actual transport struct, e.g. &struct ssam_request_sync, + * and specifically its raw message data via ssam_request_write_data(). + */ +struct ssam_request { + u8 target_category; + u8 target_id; + u8 command_id; + u8 instance_id; + u16 flags; + u16 length; + const u8 *payload; +}; + +/** + * struct ssam_response - Response buffer for SAM request. + * @capacity: Capacity of the buffer, in bytes. + * @length: Length of the actual data stored in the memory pointed to by + * @pointer, in bytes. Set by the transport system. + * @pointer: Pointer to the buffer's memory, storing the response payload data. + */ +struct ssam_response { + size_t capacity; + size_t length; + u8 *pointer; +}; + +struct ssam_controller; + +struct ssam_controller *ssam_get_controller(void); +struct ssam_controller *ssam_client_bind(struct device *client); +int ssam_client_link(struct ssam_controller *ctrl, struct device *client); + +struct device *ssam_controller_device(struct ssam_controller *c); + +struct ssam_controller *ssam_controller_get(struct ssam_controller *c); +void ssam_controller_put(struct ssam_controller *c); + +void ssam_controller_statelock(struct ssam_controller *c); +void ssam_controller_stateunlock(struct ssam_controller *c); + +ssize_t ssam_request_write_data(struct ssam_span *buf, + struct ssam_controller *ctrl, + const struct ssam_request *spec); + + +/* -- Synchronous request interface. ---------------------------------------- */ + +/** + * struct ssam_request_sync - Synchronous SAM request struct. + * @base: Underlying SSH request. + * @comp: Completion used to signal full completion of the request. After the + * request has been submitted, this struct may only be modified or + * deallocated after the completion has been signaled. + * request has been submitted, + * @resp: Buffer to store the response. + * @status: Status of the request, set after the base request has been + * completed or has failed. + */ +struct ssam_request_sync { + struct ssh_request base; + struct completion comp; + struct ssam_response *resp; + int status; +}; + +int ssam_request_sync_alloc(size_t payload_len, gfp_t flags, + struct ssam_request_sync **rqst, + struct ssam_span *buffer); + +void ssam_request_sync_free(struct ssam_request_sync *rqst); + +int ssam_request_sync_init(struct ssam_request_sync *rqst, + enum ssam_request_flags flags); + +/** + * ssam_request_sync_set_data - Set message data of a synchronous request. + * @rqst: The request. + * @ptr: Pointer to the request message data. + * @len: Length of the request message data. + * + * Set the request message data of a synchronous request. The provided buffer + * needs to live until the request has been completed. + */ +static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst, + u8 *ptr, size_t len) +{ + ssh_request_set_data(&rqst->base, ptr, len); +} + +/** + * ssam_request_sync_set_resp - Set response buffer of a synchronous request. + * @rqst: The request. + * @resp: The response buffer. + * + * Sets the response buffer of a synchronous request. This buffer will store + * the response of the request after it has been completed. May be %NULL if no + * response is expected. + */ +static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst, + struct ssam_response *resp) +{ + rqst->resp = resp; +} + +int ssam_request_sync_submit(struct ssam_controller *ctrl, + struct ssam_request_sync *rqst); + +/** + * ssam_request_sync_wait - Wait for completion of a synchronous request. + * @rqst: The request to wait for. + * + * Wait for completion and release of a synchronous request. After this + * function terminates, the request is guaranteed to have left the transport + * system. After successful submission of a request, this function must be + * called before accessing the response of the request, freeing the request, + * or freeing any of the buffers associated with the request. + * + * This function must not be called if the request has not been submitted yet + * and may lead to a deadlock/infinite wait if a subsequent request submission + * fails in that case, due to the completion never triggering. + * + * Return: Returns the status of the given request, which is set on completion + * of the packet. This value is zero on success and negative on failure. + */ +static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst) +{ + wait_for_completion(&rqst->comp); + return rqst->status; +} + +int ssam_request_sync(struct ssam_controller *ctrl, + const struct ssam_request *spec, + struct ssam_response *rsp); + +int ssam_request_sync_with_buffer(struct ssam_controller *ctrl, + const struct ssam_request *spec, + struct ssam_response *rsp, + struct ssam_span *buf); + +/** + * ssam_request_sync_onstack - Execute a synchronous request on the stack. + * @ctrl: The controller via which the request is submitted. + * @rqst: The request specification. + * @rsp: The response buffer. + * @payload_len: The (maximum) request payload length. + * + * Allocates a synchronous request with specified payload length on the stack, + * fully initializes it via the provided request specification, submits it, + * and finally waits for its completion before returning its status. This + * helper macro essentially allocates the request message buffer on the stack + * and then calls ssam_request_sync_with_buffer(). + * + * Note: The @payload_len parameter specifies the maximum payload length, used + * for buffer allocation. The actual payload length may be smaller. + * + * Return: Returns the status of the request or any failure during setup, i.e. + * zero on success and a negative value on failure. + */ +#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \ + ({ \ + u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \ + struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \ + \ + ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \ + }) + +/** + * __ssam_retry - Retry request in case of I/O errors or timeouts. + * @request: The request function to execute. Must return an integer. + * @n: Number of tries. + * @args: Arguments for the request function. + * + * Executes the given request function, i.e. calls @request. In case the + * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request + * or underlying packet timed out), @request will be re-executed again, up to + * @n times in total. + * + * Return: Returns the return value of the last execution of @request. + */ +#define __ssam_retry(request, n, args...) \ + ({ \ + int __i, __s = 0; \ + \ + for (__i = (n); __i > 0; __i--) { \ + __s = request(args); \ + if (__s != -ETIMEDOUT && __s != -EREMOTEIO) \ + break; \ + } \ + __s; \ + }) + +/** + * ssam_retry - Retry request in case of I/O errors or timeouts up to three + * times in total. + * @request: The request function to execute. Must return an integer. + * @args: Arguments for the request function. + * + * Executes the given request function, i.e. calls @request. In case the + * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request + * or underlying packet timed out), @request will be re-executed again, up to + * three times in total. + * + * See __ssam_retry() for a more generic macro for this purpose. + * + * Return: Returns the return value of the last execution of @request. + */ +#define ssam_retry(request, args...) \ + __ssam_retry(request, 3, args) + +/** + * struct ssam_request_spec - Blue-print specification of SAM request. + * @target_category: Category of the request's target. See &enum ssam_ssh_tc. + * @target_id: ID of the request's target. + * @command_id: Command ID of the request. + * @instance_id: Instance ID of the request's target. + * @flags: Flags for the request. See &enum ssam_request_flags. + * + * Blue-print specification for a SAM request. This struct describes the + * unique static parameters of a request (i.e. type) without specifying any of + * its instance-specific data (e.g. payload). It is intended to be used as base + * for defining simple request functions via the + * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros. + */ +struct ssam_request_spec { + u8 target_category; + u8 target_id; + u8 command_id; + u8 instance_id; + u8 flags; +}; + +/** + * struct ssam_request_spec_md - Blue-print specification for multi-device SAM + * request. + * @target_category: Category of the request's target. See &enum ssam_ssh_tc. + * @command_id: Command ID of the request. + * @flags: Flags for the request. See &enum ssam_request_flags. + * + * Blue-print specification for a multi-device SAM request, i.e. a request + * that is applicable to multiple device instances, described by their + * individual target and instance IDs. This struct describes the unique static + * parameters of a request (i.e. type) without specifying any of its + * instance-specific data (e.g. payload) and without specifying any of its + * device specific IDs (i.e. target and instance ID). It is intended to be + * used as base for defining simple multi-device request functions via the + * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()`` + * families of macros. + */ +struct ssam_request_spec_md { + u8 target_category; + u8 command_id; + u8 flags; +}; + +/** + * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function + * with neither argument nor return value. + * @name: Name of the generated function. + * @spec: Specification (&struct ssam_request_spec) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request having neither argument nor return value. The + * generated function takes care of setting up the request struct and buffer + * allocation, as well as execution of the request itself, returning once the + * request has been fully completed. The required transport buffer will be + * allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl)``, returning the status of the request, which is zero on success and + * negative on failure. The ``ctrl`` parameter is the controller via which the + * request is being sent. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \ + int name(struct ssam_controller *ctrl) \ + { \ + struct ssam_request_spec s = (struct ssam_request_spec)spec; \ + struct ssam_request rqst; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = s.target_id; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = s.instance_id; \ + rqst.flags = s.flags; \ + rqst.length = 0; \ + rqst.payload = NULL; \ + \ + return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with + * argument. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @spec: Specification (&struct ssam_request_spec) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking an argument of type @atype and having no + * return value. The generated function takes care of setting up the request + * struct, buffer allocation, as well as execution of the request itself, + * returning once the request has been fully completed. The required transport + * buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl, const atype *arg)``, returning the status of the request, which is + * zero on success and negative on failure. The ``ctrl`` parameter is the + * controller via which the request is sent. The request argument is specified + * via the ``arg`` pointer. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \ + int name(struct ssam_controller *ctrl, const atype *arg) \ + { \ + struct ssam_request_spec s = (struct ssam_request_spec)spec; \ + struct ssam_request rqst; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = s.target_id; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = s.instance_id; \ + rqst.flags = s.flags; \ + rqst.length = sizeof(atype); \ + rqst.payload = (u8 *)arg; \ + \ + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \ + sizeof(atype)); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with + * return value. + * @name: Name of the generated function. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking no argument but having a return value of + * type @rtype. The generated function takes care of setting up the request + * and response structs, buffer allocation, as well as execution of the + * request itself, returning once the request has been fully completed. The + * required transport buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl, rtype *ret)``, returning the status of the request, which is zero on + * success and negative on failure. The ``ctrl`` parameter is the controller + * via which the request is sent. The request's return value is written to the + * memory pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \ + int name(struct ssam_controller *ctrl, rtype *ret) \ + { \ + struct ssam_request_spec s = (struct ssam_request_spec)spec; \ + struct ssam_request rqst; \ + struct ssam_response rsp; \ + int status; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = s.target_id; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = s.instance_id; \ + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ + rqst.length = 0; \ + rqst.payload = NULL; \ + \ + rsp.capacity = sizeof(rtype); \ + rsp.length = 0; \ + rsp.pointer = (u8 *)ret; \ + \ + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \ + if (status) \ + return status; \ + \ + if (rsp.length != sizeof(rtype)) { \ + struct device *dev = ssam_controller_device(ctrl); \ + dev_err(dev, \ + "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \ + sizeof(rtype), rsp.length, rqst.target_category,\ + rqst.command_id); \ + return -EIO; \ + } \ + \ + return 0; \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM + * request function with neither argument nor return value. + * @name: Name of the generated function. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request having neither argument nor return value. Device + * specifying parameters are not hard-coded, but instead must be provided to + * the function. The generated function takes care of setting up the request + * struct, buffer allocation, as well as execution of the request itself, + * returning once the request has been fully completed. The required transport + * buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is + * zero on success and negative on failure. The ``ctrl`` parameter is the + * controller via which the request is sent, ``tid`` the target ID for the + * request, and ``iid`` the instance ID. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \ + int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \ + { \ + struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ + struct ssam_request rqst; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = tid; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = iid; \ + rqst.flags = s.flags; \ + rqst.length = 0; \ + rqst.payload = NULL; \ + \ + return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM + * request function with argument. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking an argument of type @atype and having no + * return value. Device specifying parameters are not hard-coded, but instead + * must be provided to the function. The generated function takes care of + * setting up the request struct, buffer allocation, as well as execution of + * the request itself, returning once the request has been fully completed. + * The required transport buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the + * request, which is zero on success and negative on failure. The ``ctrl`` + * parameter is the controller via which the request is sent, ``tid`` the + * target ID for the request, and ``iid`` the instance ID. The request argument + * is specified via the ``arg`` pointer. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \ + int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\ + { \ + struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ + struct ssam_request rqst; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = tid; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = iid; \ + rqst.flags = s.flags; \ + rqst.length = sizeof(atype); \ + rqst.payload = (u8 *)arg; \ + \ + return ssam_request_sync_onstack(ctrl, &rqst, NULL, \ + sizeof(atype)); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM + * request function with return value. + * @name: Name of the generated function. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking no argument but having a return value of + * type @rtype. Device specifying parameters are not hard-coded, but instead + * must be provided to the function. The generated function takes care of + * setting up the request and response structs, buffer allocation, as well as + * execution of the request itself, returning once the request has been fully + * completed. The required transport buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_controller + * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request, + * which is zero on success and negative on failure. The ``ctrl`` parameter is + * the controller via which the request is sent, ``tid`` the target ID for the + * request, and ``iid`` the instance ID. The request's return value is written + * to the memory pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \ + int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \ + { \ + struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ + struct ssam_request rqst; \ + struct ssam_response rsp; \ + int status; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = tid; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = iid; \ + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ + rqst.length = 0; \ + rqst.payload = NULL; \ + \ + rsp.capacity = sizeof(rtype); \ + rsp.length = 0; \ + rsp.pointer = (u8 *)ret; \ + \ + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \ + if (status) \ + return status; \ + \ + if (rsp.length != sizeof(rtype)) { \ + struct device *dev = ssam_controller_device(ctrl); \ + dev_err(dev, \ + "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \ + sizeof(rtype), rsp.length, rqst.target_category,\ + rqst.command_id); \ + return -EIO; \ + } \ + \ + return 0; \ + } + + +/* -- Event notifier/callbacks. --------------------------------------------- */ + +#define SSAM_NOTIF_STATE_SHIFT 2 +#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1) + +/** + * enum ssam_notif_flags - Flags used in return values from SSAM notifier + * callback functions. + * + * @SSAM_NOTIF_HANDLED: + * Indicates that the notification has been handled. This flag should be + * set by the handler if the handler can act/has acted upon the event + * provided to it. This flag should not be set if the handler is not a + * primary handler intended for the provided event. + * + * If this flag has not been set by any handler after the notifier chain + * has been traversed, a warning will be emitted, stating that the event + * has not been handled. + * + * @SSAM_NOTIF_STOP: + * Indicates that the notifier traversal should stop. If this flag is + * returned from a notifier callback, notifier chain traversal will + * immediately stop and any remaining notifiers will not be called. This + * flag is automatically set when ssam_notifier_from_errno() is called + * with a negative error value. + */ +enum ssam_notif_flags { + SSAM_NOTIF_HANDLED = BIT(0), + SSAM_NOTIF_STOP = BIT(1), +}; + +struct ssam_event_notifier; + +typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf, + const struct ssam_event *event); + +/** + * struct ssam_notifier_block - Base notifier block for SSAM event + * notifications. + * @node: The node for the list of notifiers. + * @fn: The callback function of this notifier. This function takes the + * respective notifier block and event as input and should return + * a notifier value, which can either be obtained from the flags + * provided in &enum ssam_notif_flags, converted from a standard + * error value via ssam_notifier_from_errno(), or a combination of + * both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``). + * @priority: Priority value determining the order in which notifier callbacks + * will be called. A higher value means higher priority, i.e. the + * associated callback will be executed earlier than other (lower + * priority) callbacks. + */ +struct ssam_notifier_block { + struct list_head node; + ssam_notifier_fn_t fn; + int priority; +}; + +/** + * ssam_notifier_from_errno() - Convert standard error value to notifier + * return code. + * @err: The error code to convert, must be negative (in case of failure) or + * zero (in case of success). + * + * Return: Returns the notifier return value obtained by converting the + * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag + * will be set, causing notifier call chain traversal to abort. + */ +static inline u32 ssam_notifier_from_errno(int err) +{ + if (WARN_ON(err > 0) || err == 0) + return 0; + else + return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP; +} + +/** + * ssam_notifier_to_errno() - Convert notifier return code to standard error + * value. + * @ret: The notifier return value to convert. + * + * Return: Returns the negative error value encoded in @ret or zero if @ret + * indicates success. + */ +static inline int ssam_notifier_to_errno(u32 ret) +{ + return -(ret >> SSAM_NOTIF_STATE_SHIFT); +} + + +/* -- Event/notification registry. ------------------------------------------ */ + +/** + * struct ssam_event_registry - Registry specification used for enabling events. + * @target_category: Target category for the event registry requests. + * @target_id: Target ID for the event registry requests. + * @cid_enable: Command ID for the event-enable request. + * @cid_disable: Command ID for the event-disable request. + * + * This struct describes a SAM event registry via the minimal collection of + * SAM IDs specifying the requests to use for enabling and disabling an event. + * The individual event to be enabled/disabled itself is specified via &struct + * ssam_event_id. + */ +struct ssam_event_registry { + u8 target_category; + u8 target_id; + u8 cid_enable; + u8 cid_disable; +}; + +/** + * struct ssam_event_id - Unique event ID used for enabling events. + * @target_category: Target category of the event source. + * @instance: Instance ID of the event source. + * + * This struct specifies the event to be enabled/disabled via an externally + * provided registry. It does not specify the registry to be used itself, this + * is done via &struct ssam_event_registry. + */ +struct ssam_event_id { + u8 target_category; + u8 instance; +}; + +/** + * enum ssam_event_mask - Flags specifying how events are matched to notifiers. + * + * @SSAM_EVENT_MASK_NONE: + * Run the callback for any event with matching target category. Do not + * do any additional filtering. + * + * @SSAM_EVENT_MASK_TARGET: + * In addition to filtering by target category, only execute the notifier + * callback for events with a target ID matching to the one of the + * registry used for enabling/disabling the event. + * + * @SSAM_EVENT_MASK_INSTANCE: + * In addition to filtering by target category, only execute the notifier + * callback for events with an instance ID matching to the instance ID + * used when enabling the event. + * + * @SSAM_EVENT_MASK_STRICT: + * Do all the filtering above. + */ +enum ssam_event_mask { + SSAM_EVENT_MASK_TARGET = BIT(0), + SSAM_EVENT_MASK_INSTANCE = BIT(1), + + SSAM_EVENT_MASK_NONE = 0, + SSAM_EVENT_MASK_STRICT = + SSAM_EVENT_MASK_TARGET + | SSAM_EVENT_MASK_INSTANCE, +}; + +/** + * SSAM_EVENT_REGISTRY() - Define a new event registry. + * @tc: Target category for the event registry requests. + * @tid: Target ID for the event registry requests. + * @cid_en: Command ID for the event-enable request. + * @cid_dis: Command ID for the event-disable request. + * + * Return: Returns the &struct ssam_event_registry specified by the given + * parameters. + */ +#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \ + ((struct ssam_event_registry) { \ + .target_category = (tc), \ + .target_id = (tid), \ + .cid_enable = (cid_en), \ + .cid_disable = (cid_dis), \ + }) + +#define SSAM_EVENT_REGISTRY_SAM \ + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c) + +#define SSAM_EVENT_REGISTRY_KIP \ + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28) + +#define SSAM_EVENT_REGISTRY_REG \ + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) + +/** + * struct ssam_event_notifier - Notifier block for SSAM events. + * @base: The base notifier block with callback function and priority. + * @event: The event for which this block will receive notifications. + * @event.reg: Registry via which the event will be enabled/disabled. + * @event.id: ID specifying the event. + * @event.mask: Flags determining how events are matched to the notifier. + * @event.flags: Flags used for enabling the event. + */ +struct ssam_event_notifier { + struct ssam_notifier_block base; + + struct { + struct ssam_event_registry reg; + struct ssam_event_id id; + enum ssam_event_mask mask; + u8 flags; + } event; +}; + +int ssam_notifier_register(struct ssam_controller *ctrl, + struct ssam_event_notifier *n); + +int ssam_notifier_unregister(struct ssam_controller *ctrl, + struct ssam_event_notifier *n); + +#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */ diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h new file mode 100644 index 000000000000..02f3e06c0a60 --- /dev/null +++ b/include/linux/surface_aggregator/device.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Surface System Aggregator Module (SSAM) bus and client-device subsystem. + * + * Main interface for the surface-aggregator bus, surface-aggregator client + * devices, and respective drivers building on top of the SSAM controller. + * Provides support for non-platform/non-ACPI SSAM clients via dedicated + * subsystem. + * + * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H +#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H + +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/types.h> + +#include <linux/surface_aggregator/controller.h> + + +/* -- Surface System Aggregator Module bus. --------------------------------- */ + +/** + * enum ssam_device_domain - SAM device domain. + * @SSAM_DOMAIN_VIRTUAL: Virtual device. + * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub. + */ +enum ssam_device_domain { + SSAM_DOMAIN_VIRTUAL = 0x00, + SSAM_DOMAIN_SERIALHUB = 0x01, +}; + +/** + * enum ssam_virtual_tc - Target categories for the virtual SAM domain. + * @SSAM_VIRTUAL_TC_HUB: Device hub category. + */ +enum ssam_virtual_tc { + SSAM_VIRTUAL_TC_HUB = 0x00, +}; + +/** + * struct ssam_device_uid - Unique identifier for SSAM device. + * @domain: Domain of the device. + * @category: Target category of the device. + * @target: Target ID of the device. + * @instance: Instance ID of the device. + * @function: Sub-function of the device. This field can be used to split a + * single SAM device into multiple virtual subdevices to separate + * different functionality of that device and allow one driver per + * such functionality. + */ +struct ssam_device_uid { + u8 domain; + u8 category; + u8 target; + u8 instance; + u8 function; +}; + +/* + * Special values for device matching. + * + * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and + * SSAM_SDEV() exclusively. Specifically, they are used to initialize the + * match_flags member of the device ID structure. Do not use them directly + * with struct ssam_device_id or struct ssam_device_uid. + */ +#define SSAM_ANY_TID 0xffff +#define SSAM_ANY_IID 0xffff +#define SSAM_ANY_FUN 0xffff + +/** + * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given + * parameters. + * @d: Domain of the device. + * @cat: Target category of the device. + * @tid: Target ID of the device. + * @iid: Instance ID of the device. + * @fun: Sub-function of the device. + * + * Initializes a &struct ssam_device_id with the given parameters. See &struct + * ssam_device_uid for details regarding the parameters. The special values + * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that + * matching should ignore target ID, instance ID, and/or sub-function, + * respectively. This macro initializes the ``match_flags`` field based on the + * given parameters. + * + * Note: The parameters @d and @cat must be valid &u8 values, the parameters + * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID, + * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not + * allowed. + */ +#define SSAM_DEVICE(d, cat, tid, iid, fun) \ + .match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \ + | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \ + | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \ + .domain = d, \ + .category = cat, \ + .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \ + .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \ + .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \ + +/** + * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with + * the given parameters. + * @cat: Target category of the device. + * @tid: Target ID of the device. + * @iid: Instance ID of the device. + * @fun: Sub-function of the device. + * + * Initializes a &struct ssam_device_id with the given parameters in the + * virtual domain. See &struct ssam_device_uid for details regarding the + * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and + * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID, + * instance ID, and/or sub-function, respectively. This macro initializes the + * ``match_flags`` field based on the given parameters. + * + * Note: The parameter @cat must be a valid &u8 value, the parameters @tid, + * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID, + * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not + * allowed. + */ +#define SSAM_VDEV(cat, tid, iid, fun) \ + SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun) + +/** + * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device + * with the given parameters. + * @cat: Target category of the device. + * @tid: Target ID of the device. + * @iid: Instance ID of the device. + * @fun: Sub-function of the device. + * + * Initializes a &struct ssam_device_id with the given parameters in the SSH + * domain. See &struct ssam_device_uid for details regarding the parameters. + * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be + * used to specify that matching should ignore target ID, instance ID, and/or + * sub-function, respectively. This macro initializes the ``match_flags`` + * field based on the given parameters. + * + * Note: The parameter @cat must be a valid &u8 value, the parameters @tid, + * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID, + * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not + * allowed. + */ +#define SSAM_SDEV(cat, tid, iid, fun) \ + SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun) + +/** + * struct ssam_device - SSAM client device. + * @dev: Driver model representation of the device. + * @ctrl: SSAM controller managing this device. + * @uid: UID identifying the device. + */ +struct ssam_device { + struct device dev; + struct ssam_controller *ctrl; + + struct ssam_device_uid uid; +}; + +/** + * struct ssam_device_driver - SSAM client device driver. + * @driver: Base driver model structure. + * @match_table: Match table specifying which devices the driver should bind to. + * @probe: Called when the driver is being bound to a device. + * @remove: Called when the driver is being unbound from the device. + */ +struct ssam_device_driver { + struct device_driver driver; + + const struct ssam_device_id *match_table; + + int (*probe)(struct ssam_device *sdev); + void (*remove)(struct ssam_device *sdev); +}; + +extern struct bus_type ssam_bus_type; +extern const struct device_type ssam_device_type; + +/** + * is_ssam_device() - Check if the given device is a SSAM client device. + * @d: The device to test the type of. + * + * Return: Returns %true if the specified device is of type &struct + * ssam_device, i.e. the device type points to %ssam_device_type, and %false + * otherwise. + */ +static inline bool is_ssam_device(struct device *d) +{ + return d->type == &ssam_device_type; +} + +/** + * to_ssam_device() - Casts the given device to a SSAM client device. + * @d: The device to cast. + * + * Casts the given &struct device to a &struct ssam_device. The caller has to + * ensure that the given device is actually enclosed in a &struct ssam_device, + * e.g. by calling is_ssam_device(). + * + * Return: Returns a pointer to the &struct ssam_device wrapping the given + * device @d. + */ +static inline struct ssam_device *to_ssam_device(struct device *d) +{ + return container_of(d, struct ssam_device, dev); +} + +/** + * to_ssam_device_driver() - Casts the given device driver to a SSAM client + * device driver. + * @d: The driver to cast. + * + * Casts the given &struct device_driver to a &struct ssam_device_driver. The + * caller has to ensure that the given driver is actually enclosed in a + * &struct ssam_device_driver. + * + * Return: Returns the pointer to the &struct ssam_device_driver wrapping the + * given device driver @d. + */ +static inline +struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d) +{ + return container_of(d, struct ssam_device_driver, driver); +} + +const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table, + const struct ssam_device_uid uid); + +const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev); + +const void *ssam_device_get_match_data(const struct ssam_device *dev); + +struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl, + struct ssam_device_uid uid); + +int ssam_device_add(struct ssam_device *sdev); +void ssam_device_remove(struct ssam_device *sdev); + +/** + * ssam_device_get() - Increment reference count of SSAM client device. + * @sdev: The device to increment the reference count of. + * + * Increments the reference count of the given SSAM client device by + * incrementing the reference count of the enclosed &struct device via + * get_device(). + * + * See ssam_device_put() for the counter-part of this function. + * + * Return: Returns the device provided as input. + */ +static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev) +{ + return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL; +} + +/** + * ssam_device_put() - Decrement reference count of SSAM client device. + * @sdev: The device to decrement the reference count of. + * + * Decrements the reference count of the given SSAM client device by + * decrementing the reference count of the enclosed &struct device via + * put_device(). + * + * See ssam_device_get() for the counter-part of this function. + */ +static inline void ssam_device_put(struct ssam_device *sdev) +{ + if (sdev) + put_device(&sdev->dev); +} + +/** + * ssam_device_get_drvdata() - Get driver-data of SSAM client device. + * @sdev: The device to get the driver-data from. + * + * Return: Returns the driver-data of the given device, previously set via + * ssam_device_set_drvdata(). + */ +static inline void *ssam_device_get_drvdata(struct ssam_device *sdev) +{ + return dev_get_drvdata(&sdev->dev); +} + +/** + * ssam_device_set_drvdata() - Set driver-data of SSAM client device. + * @sdev: The device to set the driver-data of. + * @data: The data to set the device's driver-data pointer to. + */ +static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data) +{ + dev_set_drvdata(&sdev->dev, data); +} + +int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o); +void ssam_device_driver_unregister(struct ssam_device_driver *d); + +/** + * ssam_device_driver_register() - Register a SSAM client device driver. + * @drv: The driver to register. + */ +#define ssam_device_driver_register(drv) \ + __ssam_device_driver_register(drv, THIS_MODULE) + +/** + * module_ssam_device_driver() - Helper macro for SSAM device driver + * registration. + * @drv: The driver managed by this module. + * + * Helper macro to register a SSAM device driver via module_init() and + * module_exit(). This macro may only be used once per module and replaces the + * aforementioned definitions. + */ +#define module_ssam_device_driver(drv) \ + module_driver(drv, ssam_device_driver_register, \ + ssam_device_driver_unregister) + + +/* -- Helpers for client-device requests. ----------------------------------- */ + +/** + * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM + * request function with neither argument nor return value. + * @name: Name of the generated function. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request having neither argument nor return value. Device + * specifying parameters are not hard-coded, but instead are provided via the + * client device, specifically its UID, supplied when calling this function. + * The generated function takes care of setting up the request struct, buffer + * allocation, as well as execution of the request itself, returning once the + * request has been fully completed. The required transport buffer will be + * allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_device *sdev)``, + * returning the status of the request, which is zero on success and negative + * on failure. The ``sdev`` parameter specifies both the target device of the + * request and by association the controller via which the request is sent. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \ + SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \ + int name(struct ssam_device *sdev) \ + { \ + return __raw_##name(sdev->ctrl, sdev->uid.target, \ + sdev->uid.instance); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM + * request function with argument. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking an argument of type @atype and having no + * return value. Device specifying parameters are not hard-coded, but instead + * are provided via the client device, specifically its UID, supplied when + * calling this function. The generated function takes care of setting up the + * request struct, buffer allocation, as well as execution of the request + * itself, returning once the request has been fully completed. The required + * transport buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_device *sdev, + * const atype *arg)``, returning the status of the request, which is zero on + * success and negative on failure. The ``sdev`` parameter specifies both the + * target device of the request and by association the controller via which + * the request is sent. The request's argument is specified via the ``arg`` + * pointer. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \ + SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \ + int name(struct ssam_device *sdev, const atype *arg) \ + { \ + return __raw_##name(sdev->ctrl, sdev->uid.target, \ + sdev->uid.instance, arg); \ + } + +/** + * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM + * request function with return value. + * @name: Name of the generated function. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by + * @spec, with the request taking no argument but having a return value of + * type @rtype. Device specifying parameters are not hard-coded, but instead + * are provided via the client device, specifically its UID, supplied when + * calling this function. The generated function takes care of setting up the + * request struct, buffer allocation, as well as execution of the request + * itself, returning once the request has been fully completed. The required + * transport buffer will be allocated on the stack. + * + * The generated function is defined as ``int name(struct ssam_device *sdev, + * rtype *ret)``, returning the status of the request, which is zero on + * success and negative on failure. The ``sdev`` parameter specifies both the + * target device of the request and by association the controller via which + * the request is sent. The request's return value is written to the memory + * pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \ + SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \ + int name(struct ssam_device *sdev, rtype *ret) \ + { \ + return __raw_##name(sdev->ctrl, sdev->uid.target, \ + sdev->uid.instance, ret); \ + } + +#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */ diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h new file mode 100644 index 000000000000..64276fbfa1d5 --- /dev/null +++ b/include/linux/surface_aggregator/serial_hub.h @@ -0,0 +1,672 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Surface Serial Hub (SSH) protocol and communication interface. + * + * Lower-level communication layers and SSH protocol definitions for the + * Surface System Aggregator Module (SSAM). Provides the interface for basic + * packet- and request-based communication with the SSAM EC via SSH. + * + * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H +#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H + +#include <linux/crc-ccitt.h> +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/types.h> + + +/* -- Data structures for SAM-over-SSH communication. ----------------------- */ + +/** + * enum ssh_frame_type - Frame types for SSH frames. + * + * @SSH_FRAME_TYPE_DATA_SEQ: + * Indicates a data frame, followed by a payload with the length specified + * in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning + * that an ACK is required. + * + * @SSH_FRAME_TYPE_DATA_NSQ: + * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the + * message does not have to be ACKed. + * + * @SSH_FRAME_TYPE_ACK: + * Indicates an ACK message. + * + * @SSH_FRAME_TYPE_NAK: + * Indicates an error response for previously sent frame. In general, this + * means that the frame and/or payload is malformed, e.g. a CRC is wrong. + * For command-type payloads, this can also mean that the command is + * invalid. + */ +enum ssh_frame_type { + SSH_FRAME_TYPE_DATA_SEQ = 0x80, + SSH_FRAME_TYPE_DATA_NSQ = 0x00, + SSH_FRAME_TYPE_ACK = 0x40, + SSH_FRAME_TYPE_NAK = 0x04, +}; + +/** + * struct ssh_frame - SSH communication frame. + * @type: The type of the frame. See &enum ssh_frame_type. + * @len: The length of the frame payload directly following the CRC for this + * frame. Does not include the final CRC for that payload. + * @seq: The sequence number for this message/exchange. + */ +struct ssh_frame { + u8 type; + __le16 len; + u8 seq; +} __packed; + +static_assert(sizeof(struct ssh_frame) == 4); + +/* + * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes. + * + * This is the physical maximum length of the protocol. Implementations may + * set a more constrained limit. + */ +#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX + +/** + * enum ssh_payload_type - Type indicator for the SSH payload. + * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command + * payload. + */ +enum ssh_payload_type { + SSH_PLD_TYPE_CMD = 0x80, +}; + +/** + * struct ssh_command - Payload of a command-type frame. + * @type: The type of the payload. See &enum ssh_payload_type. Should be + * SSH_PLD_TYPE_CMD for this struct. + * @tc: Command target category. + * @tid_out: Output target ID. Should be zero if this an incoming (EC to host) + * message. + * @tid_in: Input target ID. Should be zero if this is an outgoing (host to + * EC) message. + * @iid: Instance ID. + * @rqid: Request ID. Used to match requests with responses and differentiate + * between responses and events. + * @cid: Command ID. + */ +struct ssh_command { + u8 type; + u8 tc; + u8 tid_out; + u8 tid_in; + u8 iid; + __le16 rqid; + u8 cid; +} __packed; + +static_assert(sizeof(struct ssh_command) == 8); + +/* + * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes. + * + * This is the physical maximum length of the protocol. Implementations may + * set a more constrained limit. + */ +#define SSH_COMMAND_MAX_PAYLOAD_SIZE \ + (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command)) + +/* + * SSH_MSG_LEN_BASE - Base-length of a SSH message. + * + * This is the minimum number of bytes required to form a message. The actual + * message length is SSH_MSG_LEN_BASE plus the length of the frame payload. + */ +#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16)) + +/* + * SSH_MSG_LEN_CTRL - Length of a SSH control message. + * + * This is the length of a SSH control message, which is equal to a SSH + * message without any payload. + */ +#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE + +/** + * SSH_MESSAGE_LENGTH() - Compute length of SSH message. + * @payload_size: Length of the payload inside the SSH frame. + * + * Return: Returns the length of a SSH message with payload of specified size. + */ +#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size)) + +/** + * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message. + * @payload_size: Length of the command payload. + * + * Return: Returns the length of a SSH command message with command payload of + * specified size. + */ +#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \ + SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size)) + +/** + * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in + * frame. + * @field: The field for which the offset should be computed. + * + * Return: Returns the offset of the specified &struct ssh_frame field in the + * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into + * account. + */ +#define SSH_MSGOFFSET_FRAME(field) \ + (sizeof(u16) + offsetof(struct ssh_frame, field)) + +/** + * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field + * in command. + * @field: The field for which the offset should be computed. + * + * Return: Returns the offset of the specified &struct ssh_command field in + * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the + * frame CRC (u16) between frame and command into account. + */ +#define SSH_MSGOFFSET_COMMAND(field) \ + (2ull * sizeof(u16) + sizeof(struct ssh_frame) \ + + offsetof(struct ssh_command, field)) + +/* + * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16. + */ +#define SSH_MSG_SYN ((u16)0x55aa) + +/** + * ssh_crc() - Compute CRC for SSH messages. + * @buf: The pointer pointing to the data for which the CRC should be computed. + * @len: The length of the data for which the CRC should be computed. + * + * Return: Returns the CRC computed on the provided data, as used for SSH + * messages. + */ +static inline u16 ssh_crc(const u8 *buf, size_t len) +{ + return crc_ccitt_false(0xffff, buf, len); +} + +/* + * SSH_NUM_EVENTS - The number of reserved event IDs. + * + * The number of reserved event IDs, used for registering an SSH event + * handler. Valid event IDs are numbers below or equal to this value, with + * exception of zero, which is not an event ID. Thus, this is also the + * absolute maximum number of event handlers that can be registered. + */ +#define SSH_NUM_EVENTS 34 + +/* + * SSH_NUM_TARGETS - The number of communication targets used in the protocol. + */ +#define SSH_NUM_TARGETS 2 + +/** + * ssh_rqid_next_valid() - Return the next valid request ID. + * @rqid: The current request ID. + * + * Return: Returns the next valid request ID, following the current request ID + * provided to this function. This function skips any request IDs reserved for + * events. + */ +static inline u16 ssh_rqid_next_valid(u16 rqid) +{ + return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u; +} + +/** + * ssh_rqid_to_event() - Convert request ID to its corresponding event ID. + * @rqid: The request ID to convert. + */ +static inline u16 ssh_rqid_to_event(u16 rqid) +{ + return rqid - 1u; +} + +/** + * ssh_rqid_is_event() - Check if given request ID is a valid event ID. + * @rqid: The request ID to check. + */ +static inline bool ssh_rqid_is_event(u16 rqid) +{ + return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS; +} + +/** + * ssh_tc_to_rqid() - Convert target category to its corresponding request ID. + * @tc: The target category to convert. + */ +static inline u16 ssh_tc_to_rqid(u8 tc) +{ + return tc; +} + +/** + * ssh_tid_to_index() - Convert target ID to its corresponding target index. + * @tid: The target ID to convert. + */ +static inline u8 ssh_tid_to_index(u8 tid) +{ + return tid - 1u; +} + +/** + * ssh_tid_is_valid() - Check if target ID is valid/supported. + * @tid: The target ID to check. + */ +static inline bool ssh_tid_is_valid(u8 tid) +{ + return ssh_tid_to_index(tid) < SSH_NUM_TARGETS; +} + +/** + * struct ssam_span - Reference to a buffer region. + * @ptr: Pointer to the buffer region. + * @len: Length of the buffer region. + * + * A reference to a (non-owned) buffer segment, consisting of pointer and + * length. Use of this struct indicates non-owned data, i.e. data of which the + * life-time is managed (i.e. it is allocated/freed) via another pointer. + */ +struct ssam_span { + u8 *ptr; + size_t len; +}; + +/* + * Known SSH/EC target categories. + * + * List of currently known target category values; "Known" as in we know they + * exist and are valid on at least some device/model. Detailed functionality + * or the full category name is only known for some of these categories and + * is detailed in the respective comment below. + * + * These values and abbreviations have been extracted from strings inside the + * Windows driver. + */ +enum ssam_ssh_tc { + /* Category 0x00 is invalid for EC use. */ + SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */ + SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */ + SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */ + SSAM_SSH_TC_PMC = 0x04, + SSAM_SSH_TC_FAN = 0x05, + SSAM_SSH_TC_PoM = 0x06, + SSAM_SSH_TC_DBG = 0x07, + SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */ + SSAM_SSH_TC_FWU = 0x09, + SSAM_SSH_TC_UNI = 0x0a, + SSAM_SSH_TC_LPC = 0x0b, + SSAM_SSH_TC_TCL = 0x0c, + SSAM_SSH_TC_SFL = 0x0d, + SSAM_SSH_TC_KIP = 0x0e, + SSAM_SSH_TC_EXT = 0x0f, + SSAM_SSH_TC_BLD = 0x10, + SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */ + SSAM_SSH_TC_SEN = 0x12, + SSAM_SSH_TC_SRQ = 0x13, + SSAM_SSH_TC_MCU = 0x14, + SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */ + SSAM_SSH_TC_TCH = 0x16, + SSAM_SSH_TC_BKL = 0x17, + SSAM_SSH_TC_TAM = 0x18, + SSAM_SSH_TC_ACC = 0x19, + SSAM_SSH_TC_UFI = 0x1a, + SSAM_SSH_TC_USC = 0x1b, + SSAM_SSH_TC_PEN = 0x1c, + SSAM_SSH_TC_VID = 0x1d, + SSAM_SSH_TC_AUD = 0x1e, + SSAM_SSH_TC_SMC = 0x1f, + SSAM_SSH_TC_KPD = 0x20, + SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */ +}; + + +/* -- Packet transport layer (ptl). ----------------------------------------- */ + +/** + * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet. + * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets. + * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data packets. + * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets. + * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets. + */ +enum ssh_packet_base_priority { + SSH_PACKET_PRIORITY_FLUSH = 0, /* same as DATA to sequence flush */ + SSH_PACKET_PRIORITY_DATA = 0, + SSH_PACKET_PRIORITY_NAK = 1, + SSH_PACKET_PRIORITY_ACK = 2, +}; + +/* + * Same as SSH_PACKET_PRIORITY() below, only with actual values. + */ +#define __SSH_PACKET_PRIORITY(base, try) \ + (((base) << 4) | ((try) & 0x0f)) + +/** + * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and + * number of tries. + * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g. + * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``. + * @try: The number of tries (must be less than 16). + * + * Compute the combined packet priority. The combined priority is dominated by + * the base priority, whereas the number of (re-)tries decides the precedence + * of packets with the same base priority, giving higher priority to packets + * that already have more tries. + * + * Return: Returns the computed priority as value fitting inside a &u8. A + * higher number means a higher priority. + */ +#define SSH_PACKET_PRIORITY(base, try) \ + __SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try)) + +/** + * ssh_packet_priority_get_try() - Get number of tries from packet priority. + * @priority: The packet priority. + * + * Return: Returns the number of tries encoded in the specified packet + * priority. + */ +static inline u8 ssh_packet_priority_get_try(u8 priority) +{ + return priority & 0x0f; +} + +/** + * ssh_packet_priority_get_base - Get base priority from packet priority. + * @priority: The packet priority. + * + * Return: Returns the base priority encoded in the given packet priority. + */ +static inline u8 ssh_packet_priority_get_base(u8 priority) +{ + return (priority & 0xf0) >> 4; +} + +enum ssh_packet_flags { + /* state flags */ + SSH_PACKET_SF_LOCKED_BIT, + SSH_PACKET_SF_QUEUED_BIT, + SSH_PACKET_SF_PENDING_BIT, + SSH_PACKET_SF_TRANSMITTING_BIT, + SSH_PACKET_SF_TRANSMITTED_BIT, + SSH_PACKET_SF_ACKED_BIT, + SSH_PACKET_SF_CANCELED_BIT, + SSH_PACKET_SF_COMPLETED_BIT, + + /* type flags */ + SSH_PACKET_TY_FLUSH_BIT, + SSH_PACKET_TY_SEQUENCED_BIT, + SSH_PACKET_TY_BLOCKING_BIT, + + /* mask for state flags */ + SSH_PACKET_FLAGS_SF_MASK = + BIT(SSH_PACKET_SF_LOCKED_BIT) + | BIT(SSH_PACKET_SF_QUEUED_BIT) + | BIT(SSH_PACKET_SF_PENDING_BIT) + | BIT(SSH_PACKET_SF_TRANSMITTING_BIT) + | BIT(SSH_PACKET_SF_TRANSMITTED_BIT) + | BIT(SSH_PACKET_SF_ACKED_BIT) + | BIT(SSH_PACKET_SF_CANCELED_BIT) + | BIT(SSH_PACKET_SF_COMPLETED_BIT), + + /* mask for type flags */ + SSH_PACKET_FLAGS_TY_MASK = + BIT(SSH_PACKET_TY_FLUSH_BIT) + | BIT(SSH_PACKET_TY_SEQUENCED_BIT) + | BIT(SSH_PACKET_TY_BLOCKING_BIT), +}; + +struct ssh_ptl; +struct ssh_packet; + +/** + * struct ssh_packet_ops - Callback operations for a SSH packet. + * @release: Function called when the packet reference count reaches zero. + * This callback must be relied upon to ensure that the packet has + * left the transport system(s). + * @complete: Function called when the packet is completed, either with + * success or failure. In case of failure, the reason for the + * failure is indicated by the value of the provided status code + * argument. This value will be zero in case of success. Note that + * a call to this callback does not guarantee that the packet is + * not in use by the transport system any more. + */ +struct ssh_packet_ops { + void (*release)(struct ssh_packet *p); + void (*complete)(struct ssh_packet *p, int status); +}; + +/** + * struct ssh_packet - SSH transport packet. + * @ptl: Pointer to the packet transport layer. May be %NULL if the packet + * (or enclosing request) has not been submitted yet. + * @refcnt: Reference count of the packet. + * @priority: Priority of the packet. Must be computed via + * SSH_PACKET_PRIORITY(). Must only be accessed while holding the + * queue lock after first submission. + * @data: Raw message data. + * @data.len: Length of the raw message data. + * @data.ptr: Pointer to the raw message data buffer. + * @state: State and type flags describing current packet state (dynamic) + * and type (static). See &enum ssh_packet_flags for possible + * options. + * @timestamp: Timestamp specifying when the latest transmission of a + * currently pending packet has been started. May be %KTIME_MAX + * before or in-between transmission attempts. Used for the packet + * timeout implementation. Must only be accessed while holding the + * pending lock after first submission. + * @queue_node: The list node for the packet queue. + * @pending_node: The list node for the set of pending packets. + * @ops: Packet operations. + */ +struct ssh_packet { + struct ssh_ptl *ptl; + struct kref refcnt; + + u8 priority; + + struct { + size_t len; + u8 *ptr; + } data; + + unsigned long state; + ktime_t timestamp; + + struct list_head queue_node; + struct list_head pending_node; + + const struct ssh_packet_ops *ops; +}; + +struct ssh_packet *ssh_packet_get(struct ssh_packet *p); +void ssh_packet_put(struct ssh_packet *p); + +/** + * ssh_packet_set_data() - Set raw message data of packet. + * @p: The packet for which the message data should be set. + * @ptr: Pointer to the memory holding the message data. + * @len: Length of the message data. + * + * Sets the raw message data buffer of the packet to the provided memory. The + * memory is not copied. Instead, the caller is responsible for management + * (i.e. allocation and deallocation) of the memory. The caller must ensure + * that the provided memory is valid and contains a valid SSH message, + * starting from the time of submission of the packet until the ``release`` + * callback has been called. During this time, the memory may not be altered + * in any way. + */ +static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len) +{ + p->data.ptr = ptr; + p->data.len = len; +} + + +/* -- Request transport layer (rtl). ---------------------------------------- */ + +enum ssh_request_flags { + /* state flags */ + SSH_REQUEST_SF_LOCKED_BIT, + SSH_REQUEST_SF_QUEUED_BIT, + SSH_REQUEST_SF_PENDING_BIT, + SSH_REQUEST_SF_TRANSMITTING_BIT, + SSH_REQUEST_SF_TRANSMITTED_BIT, + SSH_REQUEST_SF_RSPRCVD_BIT, + SSH_REQUEST_SF_CANCELED_BIT, + SSH_REQUEST_SF_COMPLETED_BIT, + + /* type flags */ + SSH_REQUEST_TY_FLUSH_BIT, + SSH_REQUEST_TY_HAS_RESPONSE_BIT, + + /* mask for state flags */ + SSH_REQUEST_FLAGS_SF_MASK = + BIT(SSH_REQUEST_SF_LOCKED_BIT) + | BIT(SSH_REQUEST_SF_QUEUED_BIT) + | BIT(SSH_REQUEST_SF_PENDING_BIT) + | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT) + | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT) + | BIT(SSH_REQUEST_SF_RSPRCVD_BIT) + | BIT(SSH_REQUEST_SF_CANCELED_BIT) + | BIT(SSH_REQUEST_SF_COMPLETED_BIT), + + /* mask for type flags */ + SSH_REQUEST_FLAGS_TY_MASK = + BIT(SSH_REQUEST_TY_FLUSH_BIT) + | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), +}; + +struct ssh_rtl; +struct ssh_request; + +/** + * struct ssh_request_ops - Callback operations for a SSH request. + * @release: Function called when the request's reference count reaches zero. + * This callback must be relied upon to ensure that the request has + * left the transport systems (both, packet an request systems). + * @complete: Function called when the request is completed, either with + * success or failure. The command data for the request response + * is provided via the &struct ssh_command parameter (``cmd``), + * the command payload of the request response via the &struct + * ssh_span parameter (``data``). + * + * If the request does not have any response or has not been + * completed with success, both ``cmd`` and ``data`` parameters will + * be NULL. If the request response does not have any command + * payload, the ``data`` span will be an empty (zero-length) span. + * + * In case of failure, the reason for the failure is indicated by + * the value of the provided status code argument (``status``). This + * value will be zero in case of success and a regular errno + * otherwise. + * + * Note that a call to this callback does not guarantee that the + * request is not in use by the transport systems any more. + */ +struct ssh_request_ops { + void (*release)(struct ssh_request *rqst); + void (*complete)(struct ssh_request *rqst, + const struct ssh_command *cmd, + const struct ssam_span *data, int status); +}; + +/** + * struct ssh_request - SSH transport request. + * @packet: The underlying SSH transport packet. + * @node: List node for the request queue and pending set. + * @state: State and type flags describing current request state (dynamic) + * and type (static). See &enum ssh_request_flags for possible + * options. + * @timestamp: Timestamp specifying when we start waiting on the response of + * the request. This is set once the underlying packet has been + * completed and may be %KTIME_MAX before that, or when the request + * does not expect a response. Used for the request timeout + * implementation. + * @ops: Request Operations. + */ +struct ssh_request { + struct ssh_packet packet; + struct list_head node; + + unsigned long state; + ktime_t timestamp; + + const struct ssh_request_ops *ops; +}; + +/** + * to_ssh_request() - Cast a SSH packet to its enclosing SSH request. + * @p: The packet to cast. + * + * Casts the given &struct ssh_packet to its enclosing &struct ssh_request. + * The caller is responsible for making sure that the packet is actually + * wrapped in a &struct ssh_request. + * + * Return: Returns the &struct ssh_request wrapping the provided packet. + */ +static inline struct ssh_request *to_ssh_request(struct ssh_packet *p) +{ + return container_of(p, struct ssh_request, packet); +} + +/** + * ssh_request_get() - Increment reference count of request. + * @r: The request to increment the reference count of. + * + * Increments the reference count of the given request by incrementing the + * reference count of the underlying &struct ssh_packet, enclosed in it. + * + * See also ssh_request_put(), ssh_packet_get(). + * + * Return: Returns the request provided as input. + */ +static inline struct ssh_request *ssh_request_get(struct ssh_request *r) +{ + return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL; +} + +/** + * ssh_request_put() - Decrement reference count of request. + * @r: The request to decrement the reference count of. + * + * Decrements the reference count of the given request by decrementing the + * reference count of the underlying &struct ssh_packet, enclosed in it. If + * the reference count reaches zero, the ``release`` callback specified in the + * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be + * called. + * + * See also ssh_request_get(), ssh_packet_put(). + */ +static inline void ssh_request_put(struct ssh_request *r) +{ + if (r) + ssh_packet_put(&r->packet); +} + +/** + * ssh_request_set_data() - Set raw message data of request. + * @r: The request for which the message data should be set. + * @ptr: Pointer to the memory holding the message data. + * @len: Length of the message data. + * + * Sets the raw message data buffer of the underlying packet to the specified + * buffer. Does not copy the actual message data, just sets the buffer pointer + * and length. Refer to ssh_packet_set_data() for more details. + */ +static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len) +{ + ssh_packet_set_data(&r->packet, ptr, len); +} + +#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 596bc2f4d9b0..4cc6ec3bf0ab 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); +extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, @@ -408,7 +408,11 @@ extern struct address_space *swapper_spaces[]; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) -extern unsigned long total_swapcache_pages(void); +static inline unsigned long total_swapcache_pages(void) +{ + return global_node_page_state(NR_SWAPCACHE); +} + extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); extern void *get_shadow_from_swap_cache(swp_entry_t entry); @@ -468,7 +472,6 @@ extern int free_swap_and_cache(swp_entry_t); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); -extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); extern int __swap_count(swp_entry_t entry); @@ -482,6 +485,7 @@ struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); extern struct swap_info_struct *get_swap_device(swp_entry_t entry); +sector_t swap_page_sector(struct page *page); static inline void put_swap_device(struct swap_info_struct *si) { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index d9c9fc9ca5d2..5857a937c637 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -29,6 +29,7 @@ enum swiotlb_force { * controllable. */ #define IO_TLB_SHIFT 11 +#define IO_TLB_SIZE (1 << IO_TLB_SHIFT) /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7688bc983de5..2839dc9a7c01 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -68,6 +68,7 @@ union bpf_attr; struct io_uring_params; struct clone_args; struct open_how; +struct mount_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -607,11 +608,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags); /* kernel/futex.c */ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, - struct __kernel_timespec __user *utime, u32 __user *uaddr2, - u32 val3); + const struct __kernel_timespec __user *utime, + u32 __user *uaddr2, u32 val3); asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val, - struct old_timespec32 __user *utime, u32 __user *uaddr2, - u32 val3); + const struct old_timespec32 __user *utime, + u32 __user *uaddr2, u32 val3); asmlinkage long sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, size_t __user *len_ptr); @@ -1028,6 +1029,9 @@ asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags); asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path, int to_dfd, const char __user *to_path, unsigned int ms_flags); +asmlinkage long sys_mount_setattr(int dfd, const char __user *path, + unsigned int flags, + struct mount_attr __user *uattr, size_t usize); asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags); asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key, const void __user *value, int aux); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 2caa34c1ca1a..d76a1ddf83a3 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -164,11 +164,13 @@ __ATTRIBUTE_GROUPS(_name) struct file; struct vm_area_struct; +struct address_space; struct bin_attribute { struct attribute attr; size_t size; void *private; + struct address_space *mapping; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2f87377e9af7..48d8a363319e 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -496,7 +496,8 @@ static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn) } struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, - const struct sk_buff *orig_skb); + const struct sk_buff *orig_skb, + const struct sk_buff *ack_skb); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index cdd049a724b1..54269e47ac9a 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -88,7 +88,7 @@ struct tee_param { * @close_session: close a session * @invoke_func: invoke a trusted function * @cancel_req: request cancel of an ongoing invoke or open - * @supp_revc: called for supplicant to get a command + * @supp_recv: called for supplicant to get a command * @supp_send: called for supplicant to send a response * @shm_register: register shared memory buffer in TEE * @shm_unregister: unregister shared memory buffer in TEE diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 13770cfe33ad..6673e4d4ac2e 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -23,7 +23,7 @@ struct ts_config; struct ts_state { unsigned int offset; - char cb[40]; + char cb[48]; }; /** diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 31b84404f047..6ac7bb1d2b1f 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -17,7 +17,6 @@ #include <linux/workqueue.h> #include <uapi/linux/thermal.h> -#define THERMAL_TRIPS_NONE -1 #define THERMAL_MAX_TRIPS 12 /* invalid cooling state */ @@ -77,8 +76,6 @@ struct thermal_zone_device_ops { int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, int, enum thermal_trend *); - int (*notify) (struct thermal_zone_device *, int, - enum thermal_trip_type); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); }; @@ -118,9 +115,9 @@ struct thermal_cooling_device { * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips - * @passive_delay: number of milliseconds to wait between polls when + * @passive_delay_jiffies: number of jiffies to wait between polls when * performing passive cooling. - * @polling_delay: number of milliseconds to wait between polls when + * @polling_delay_jiffies: number of jiffies to wait between polls when * checking whether trip points have been crossed (0 for * interrupt driven systems) * @temperature: current temperature. This is only for core code, @@ -133,9 +130,6 @@ struct thermal_cooling_device { trip point. * @prev_high_trip: the above current temperature if you've crossed a passive trip point. - * @forced_passive: If > 0, temperature at which to switch on all ACPI - * processor cooling devices. Currently only used by the - * step-wise governor. * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. * @ops: operations this &thermal_zone_device supports * @tzp: thermal zone parameters @@ -161,15 +155,14 @@ struct thermal_zone_device { void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ - int passive_delay; - int polling_delay; + unsigned long passive_delay_jiffies; + unsigned long polling_delay_jiffies; int temperature; int last_temperature; int emul_temperature; int passive; int prev_low_trip; int prev_high_trip; - unsigned int forced_passive; atomic_t need_update; struct thermal_zone_device_ops *ops; struct thermal_zone_params *tzp; @@ -397,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); @@ -444,8 +436,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) -{ } static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index c8a974cead73..9b2158c69275 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -43,6 +43,7 @@ enum syscall_work_bit { SYSCALL_WORK_BIT_SYSCALL_EMU, SYSCALL_WORK_BIT_SYSCALL_AUDIT, SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH, + SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP, }; #define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP) @@ -51,6 +52,7 @@ enum syscall_work_bit { #define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU) #define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT) #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH) +#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP) #endif #include <asm/thread_info.h> diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 034dccf93955..659a0a810fa1 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -45,6 +45,8 @@ enum tb_cfg_pkg_type { * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected * Thunderbolt dock (and Display Port). All PCIe * links downstream of the dock are removed. + * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the + * PCIe tunneling is disabled from the BIOS. */ enum tb_security_level { TB_SECURITY_NONE, @@ -52,6 +54,7 @@ enum tb_security_level { TB_SECURITY_SECURE, TB_SECURITY_DPONLY, TB_SECURITY_USBONLY, + TB_SECURITY_NOPCIE, }; /** diff --git a/include/linux/timer.h b/include/linux/timer.h index fda13c9d1256..4118a97e62fb 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,6 +192,8 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) +extern bool timer_curr_running(struct timer_list *timer); + extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/linux/topology.h b/include/linux/topology.h index ad03df1cc266..7634cd737061 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -48,6 +48,7 @@ int arch_update_cpu_topology(void); /* Conform to ACPI 2.0 SLIT distance definitions */ #define LOCAL_DISTANCE 10 #define REMOTE_DISTANCE 20 +#define DISTANCE_BITS 8 #ifndef node_distance #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) #endif diff --git a/include/linux/torture.h b/include/linux/torture.h index 7f65bd1dd307..0910c5803f35 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -32,11 +32,27 @@ #define TOROUT_STRING(s) \ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s) #define VERBOSE_TOROUT_STRING(s) \ - do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) +do { \ + if (verbose) { \ + verbose_torout_sleep(); \ + pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \ + } \ +} while (0) #define VERBOSE_TOROUT_ERRSTRING(s) \ - do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) +do { \ + if (verbose) { \ + verbose_torout_sleep(); \ + pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \ + } \ +} while (0) +void verbose_torout_sleep(void); /* Definitions for online/offline exerciser. */ +#ifdef CONFIG_HOTPLUG_CPU +int torture_num_online_cpus(void); +#else /* #ifdef CONFIG_HOTPLUG_CPU */ +static inline int torture_num_online_cpus(void) { return 1; } +#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); @@ -61,6 +77,13 @@ static inline void torture_random_init(struct torture_random_state *trsp) trsp->trs_count = 0; } +/* Definitions for high-resolution-timer sleeps. */ +int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp); +int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp); +int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp); +int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp); +int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp); + /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); int torture_shuffle_init(long shuffint); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 8f4ff39f51e7..543aa3b1dedc 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -31,6 +31,7 @@ struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; +/* if you add a new hash to this, increment TPM_MAX_HASHES below */ enum tpm_algorithms { TPM_ALG_ERROR = 0x0000, TPM_ALG_SHA1 = 0x0004, @@ -42,6 +43,12 @@ enum tpm_algorithms { TPM_ALG_SM3_256 = 0x0012, }; +/* + * maximum number of hashing algorithms a TPM can have. This is + * basically a count of every hash in tpm_algorithms above + */ +#define TPM_MAX_HASHES 5 + struct tpm_digest { u16 alg_id; u8 digest[TPM_MAX_DIGEST_SIZE]; @@ -146,7 +153,7 @@ struct tpm_chip { struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; - const struct attribute_group *groups[3]; + const struct attribute_group *groups[3 + TPM_MAX_HASHES]; unsigned int groups_cnt; u32 nr_allocated_banks; @@ -397,6 +404,10 @@ static inline u32 tpm2_rc_value(u32 rc) #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) extern int tpm_is_tpm2(struct tpm_chip *chip); +extern __must_check int tpm_try_get_ops(struct tpm_chip *chip); +extern void tpm_put_ops(struct tpm_chip *chip); +extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, + size_t min_rsp_body_length, const char *desc); extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digest); extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, @@ -410,7 +421,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip) { return -ENODEV; } - static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, struct tpm_digest *digest) { diff --git a/include/linux/trace.h b/include/linux/trace.h index 886a4ffd9d45..be1e130ed87c 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -34,8 +34,9 @@ int unregister_ftrace_export(struct trace_export *export); struct trace_array; void trace_printk_init_buffers(void); +__printf(3, 4) int trace_array_printk(struct trace_array *tr, unsigned long ip, - const char *fmt, ...); + const char *fmt, ...); int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index d321fe5ad1a1..28e7af1406f2 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -55,6 +55,8 @@ struct trace_event; int trace_raw_output_prep(struct trace_iterator *iter, struct trace_event *event); +extern __printf(2, 3) +void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); /* * The trace entry - the most basic unit of tracing. This is what @@ -87,6 +89,8 @@ struct trace_iterator { unsigned long iter_flags; void *temp; /* temp holder */ unsigned int temp_size; + char *fmt; /* modified format holder */ + unsigned int fmt_size; /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; @@ -148,17 +152,75 @@ enum print_line_t { enum print_line_t trace_handle_return(struct trace_seq *s); -void tracing_generic_entry_update(struct trace_entry *entry, - unsigned short type, - unsigned long flags, - int pc); +static inline void tracing_generic_entry_update(struct trace_entry *entry, + unsigned short type, + unsigned int trace_ctx) +{ + entry->preempt_count = trace_ctx & 0xff; + entry->pid = current->pid; + entry->type = type; + entry->flags = trace_ctx >> 16; +} + +unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_PREEMPT_RESCHED = 0x20, + TRACE_FLAG_NMI = 0x40, +}; + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) +{ + unsigned int irq_status = irqs_disabled_flags(irqflags) ? + TRACE_FLAG_IRQS_OFF : 0; + return tracing_gen_ctx_irq_test(irq_status); +} +static inline unsigned int tracing_gen_ctx(void) +{ + unsigned long irqflags; + + local_save_flags(irqflags); + return tracing_gen_ctx_flags(irqflags); +} +#else + +static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) +{ + return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); +} +static inline unsigned int tracing_gen_ctx(void) +{ + return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); +} +#endif + +static inline unsigned int tracing_gen_ctx_dec(void) +{ + unsigned int trace_ctx; + + trace_ctx = tracing_gen_ctx(); + /* + * Subtract one from the preeption counter if preemption is enabled, + * see trace_event_buffer_reserve()for details. + */ + if (IS_ENABLED(CONFIG_PREEMPTION)) + trace_ctx--; + return trace_ctx; +} + struct trace_event_file; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, - unsigned long flags, int pc); + unsigned int trace_ctx); #define TRACE_RECORD_CMDLINE BIT(0) #define TRACE_RECORD_TGID BIT(1) @@ -232,8 +294,7 @@ struct trace_event_buffer { struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; - unsigned long flags; - int pc; + unsigned int trace_ctx; struct pt_regs *regs; }; @@ -288,15 +349,8 @@ struct trace_event_call { struct event_filter *filter; void *mod; void *data; - /* - * bit 0: filter_active - * bit 1: allow trace by non root (cap any) - * bit 2: failed to apply filter - * bit 3: trace internal event (do not enable) - * bit 4: Event was enabled by module - * bit 5: use call filter rather than file filter - * bit 6: Event is a tracepoint - */ + + /* See the TRACE_EVENT_FL_* flags above */ int flags; /* static flags of different events */ #ifdef CONFIG_PERF_EVENTS diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 0f21617f1a66..9cfb099da58f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -152,25 +152,28 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #ifdef TRACEPOINTS_ENABLED #ifdef CONFIG_HAVE_STATIC_CALL -#define __DO_TRACE_CALL(name) static_call(tp_func_##name) +#define __DO_TRACE_CALL(name, args) \ + do { \ + struct tracepoint_func *it_func_ptr; \ + void *__data; \ + it_func_ptr = \ + rcu_dereference_raw((&__tracepoint_##name)->funcs); \ + if (it_func_ptr) { \ + __data = (it_func_ptr)->data; \ + static_call(tp_func_##name)(__data, args); \ + } \ + } while (0) #else -#define __DO_TRACE_CALL(name) __traceiter_##name +#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args) #endif /* CONFIG_HAVE_STATIC_CALL */ /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. - * - * Note, the proto and args passed in includes "__data" as the first parameter. - * The reason for this is to handle the "void" prototype. If a tracepoint - * has a "void" prototype, then it is invalid to declare a function - * as "(void *, void)". */ -#define __DO_TRACE(name, proto, args, cond, rcuidle) \ +#define __DO_TRACE(name, args, cond, rcuidle) \ do { \ - struct tracepoint_func *it_func_ptr; \ int __maybe_unused __idx = 0; \ - void *__data; \ \ if (!(cond)) \ return; \ @@ -190,12 +193,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) rcu_irq_enter_irqson(); \ } \ \ - it_func_ptr = \ - rcu_dereference_raw((&__tracepoint_##name)->funcs); \ - if (it_func_ptr) { \ - __data = (it_func_ptr)->data; \ - __DO_TRACE_CALL(name)(args); \ - } \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ \ if (rcuidle) { \ rcu_irq_exit_irqson(); \ @@ -206,17 +204,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } while (0) #ifndef MODULE -#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ +#define __DECLARE_TRACE_RCU(name, proto, args, cond) \ static inline void trace_##name##_rcuidle(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(name, \ - TP_PROTO(data_proto), \ - TP_ARGS(data_args), \ + TP_ARGS(args), \ TP_CONDITION(cond), 1); \ } #else -#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) +#define __DECLARE_TRACE_RCU(name, proto, args, cond) #endif /* @@ -231,7 +228,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * even when this tracepoint is off. This code has no purpose other than * poking RCU a bit. */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ @@ -239,8 +236,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(name, \ - TP_PROTO(data_proto), \ - TP_ARGS(data_args), \ + TP_ARGS(args), \ TP_CONDITION(cond), 0); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ rcu_read_lock_sched_notrace(); \ @@ -249,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ - PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ + PARAMS(cond)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ @@ -307,11 +303,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) \ it_func_ptr = \ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ - do { \ - it_func = (it_func_ptr)->func; \ - __data = (it_func_ptr)->data; \ - ((void(*)(void *, proto))(it_func))(__data, args); \ - } while ((++it_func_ptr)->func); \ + if (it_func_ptr) { \ + do { \ + it_func = READ_ONCE((it_func_ptr)->func); \ + __data = (it_func_ptr)->data; \ + ((void(*)(void *, proto))(it_func))(__data, args); \ + } while ((++it_func_ptr)->func); \ + } \ return 0; \ } \ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); @@ -330,7 +328,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ static inline void trace_##name(proto) \ { } \ static inline void trace_##name##_rcuidle(proto) \ @@ -410,14 +408,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define DECLARE_TRACE(name, proto, args) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()), \ - PARAMS(void *__data, proto), \ - PARAMS(__data, args)) + PARAMS(void *__data, proto)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ - PARAMS(void *__data, proto), \ - PARAMS(__data, args)) + PARAMS(void *__data, proto)) #define TRACE_EVENT_FLAGS(event, flag) diff --git a/include/linux/tty.h b/include/linux/tty.h index c873f475f0a7..95fc2f100f12 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -240,8 +240,7 @@ struct tty_port { wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* User TTY flags ASYNC_ */ unsigned long iflags; /* Internal flags TTY_PORT_ */ - unsigned char console:1, /* port is a console */ - low_latency:1; /* optional: tune for latency */ + unsigned char console:1; /* port is a console */ struct mutex mutex; /* Locking */ struct mutex buf_mutex; /* Buffer alloc lock */ unsigned char *xmit_buf; /* Optional buffer */ @@ -416,11 +415,14 @@ extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); extern const char *tty_name(const struct tty_struct *tty); -extern struct tty_struct *tty_kopen(dev_t device); +extern struct tty_struct *tty_kopen_exclusive(dev_t device); +extern struct tty_struct *tty_kopen_shared(dev_t device); extern void tty_kclose(struct tty_struct *tty); extern int tty_dev_name_to_number(const char *name, dev_t *number); extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); extern void tty_ldisc_unlock(struct tty_struct *tty); +extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *); +extern struct file *tty_release_redirect(struct tty_struct *tty); #else static inline void tty_kref_put(struct tty_struct *tty) { } @@ -441,7 +443,7 @@ static inline int __init tty_init(void) { return 0; } static inline const char *tty_name(const struct tty_struct *tty) { return "(none)"; } -static inline struct tty_struct *tty_kopen(dev_t device) +static inline struct tty_struct *tty_kopen_exclusive(dev_t device) { return ERR_PTR(-ENODEV); } static inline void tty_kclose(struct tty_struct *tty) { } @@ -499,6 +501,8 @@ extern void tty_unthrottle(struct tty_struct *tty); extern int tty_throttle_safe(struct tty_struct *tty); extern int tty_unthrottle_safe(struct tty_struct *tty); extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); +extern int tty_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount); extern int is_current_pgrp_orphaned(void); extern void tty_hangup(struct tty_struct *tty); extern void tty_vhangup(struct tty_struct *tty); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index b1e6043e9917..572a07976116 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -185,7 +185,8 @@ struct tty_ldisc_ops { void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *tty); ssize_t (*read)(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t nr); + unsigned char *buf, size_t nr, + void **cookie, unsigned long offset); ssize_t (*write)(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr); int (*ioctl)(struct tty_struct *tty, struct file *file, diff --git a/include/linux/types.h b/include/linux/types.h index a147977602b5..ac825ad90e44 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -14,7 +14,7 @@ typedef u32 __kernel_dev_t; typedef __kernel_fd_set fd_set; typedef __kernel_dev_t dev_t; -typedef __kernel_ino_t ino_t; +typedef __kernel_ulong_t ino_t; typedef __kernel_mode_t mode_t; typedef unsigned short umode_t; typedef u32 nlink_t; @@ -189,7 +189,11 @@ struct hlist_node { struct ustat { __kernel_daddr_t f_tfree; - __kernel_ino_t f_tinode; +#ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE + unsigned int f_tinode; +#else + unsigned long f_tinode; +#endif char f_fname[6]; char f_fpack[6]; }; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index c6abb79501b3..e81856c0ba13 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -115,12 +115,13 @@ static inline void u64_stats_inc(u64_stats_t *p) } #endif +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) +#else static inline void u64_stats_init(struct u64_stats_sync *syncp) { -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - seqcount_init(&syncp->seq); -#endif } +#endif static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { diff --git a/include/linux/uio.h b/include/linux/uio.h index 72d88566694e..27ff8eb786dc 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); + +struct csum_state { + __wsum csum; + size_t off; +}; + +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, diff --git a/include/linux/units.h b/include/linux/units.h index 5c115c809507..dcc30a53fa93 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,6 +4,10 @@ #include <linux/math.h> +#define MILLIWATT_PER_WATT 1000L +#define MICROWATT_PER_MILLIWATT 1000L +#define MICROWATT_PER_WATT 1000000L + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/usb.h b/include/linux/usb.h index 7d72c4e0713c..d6a41841b93e 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -746,6 +746,8 @@ extern int usb_lock_device_for_reset(struct usb_device *udev, extern int usb_reset_device(struct usb_device *dev); extern void usb_queue_reset_device(struct usb_interface *dev); +extern struct device *usb_intf_get_dma_device(struct usb_interface *intf); + #ifdef CONFIG_ACPI extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable); diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 0ce4377545f8..f7cb3ddce7fb 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -98,6 +98,8 @@ struct cdc_ncm_ctx { struct hrtimer tx_timer; struct tasklet_struct bh; + struct usbnet *dev; + const struct usb_cdc_ncm_desc *func_desc; const struct usb_cdc_mbim_desc *mbim_desc; const struct usb_cdc_mbim_extended_desc *mbim_extended_desc; diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 604c6c514a50..abdd310c77f0 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -36,6 +36,15 @@ #include <linux/device.h> #include <uapi/linux/usb/ch9.h> +/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */ + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1, + USB_SSP_GEN_1x2, + USB_SSP_GEN_2x2, +}; + /** * usb_ep_type_string() - Returns human readable-name of the endpoint type. * @ep_type: The endpoint type to return human-readable name for. If it's not @@ -63,6 +72,17 @@ extern const char *usb_speed_string(enum usb_device_speed speed); extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); /** + * usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count + * of a SuperSpeed Plus capable device. + * @dev: Pointer to the given USB controller device + * + * If the string from "maximum-speed" property is super-speed-plus-genXxY where + * 'X' is the generation number and 'Y' is the number of lanes, then this + * function returns the corresponding enum usb_ssp_rate. + */ +extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev); + +/** * usb_state_string - Returns human readable name for the state. * @state: The state to return a human-readable name for. If it's not * any of the states devices in usb_device_state_string enum, diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 025b41687ce9..edf3342507f1 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -88,6 +88,12 @@ struct ci_hdrc_platform_data { struct pinctrl_state *pins_default; struct pinctrl_state *pins_host; struct pinctrl_state *pins_device; + + /* platform-specific hooks */ + int (*hub_control)(struct ci_hdrc *ci, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength, + bool *done, unsigned long *flags); + void (*enter_lpm)(struct ci_hdrc *ci, bool enable); }; /* Default offset of capability registers */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index a2d229ab63ba..c71150f2c639 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -525,6 +525,8 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev, extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); extern void composite_disconnect(struct usb_gadget *gadget); +extern void composite_reset(struct usb_gadget *gadget); + extern int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl); extern void composite_suspend(struct usb_gadget *gadget); @@ -573,8 +575,8 @@ static inline u16 get_default_bcdDevice(void) { u16 bcdDevice; - bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8; - bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff)); + bcdDevice = bin2bcd(LINUX_VERSION_MAJOR) << 8; + bcdDevice |= bin2bcd(LINUX_VERSION_PATCHLEVEL); return bcdDevice; } diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index e7351d64f11f..ee04ef214ce8 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -323,6 +323,8 @@ struct usb_gadget_ops { struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); + void (*udc_set_ssp_rate)(struct usb_gadget *gadget, + enum usb_ssp_rate rate); struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); @@ -339,6 +341,10 @@ struct usb_gadget_ops { * @speed: Speed of current connection to USB host. * @max_speed: Maximal speed the UDC can handle. UDC must support this * and all slower speeds. + * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count. + * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC + * can handle. The UDC must support this and all slower speeds and lower + * number of lanes. * @state: the state we are now (attached, suspended, configured, etc) * @name: Identifies the controller hardware type. Used in diagnostics * and sometimes configuration. @@ -406,6 +412,11 @@ struct usb_gadget { struct list_head ep_list; /* of usb_ep */ enum usb_device_speed speed; enum usb_device_speed max_speed; + + /* USB SuperSpeed Plus only */ + enum usb_ssp_rate ssp_rate; + enum usb_ssp_rate max_ssp_rate; + enum usb_device_state state; const char *name; struct device dev; diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index bb9a782e1411..70d681918d01 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -225,6 +225,7 @@ enum pd_pdo_type { #define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */ #define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */ #define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */ +#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */ #define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */ #define PDO_FIXED_FRS_CURR_SHIFT 23 #define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ @@ -479,6 +480,8 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_NEWSRC 250 /* Maximum of 275ms */ #define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */ #define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */ +#define PD_T_SINK_TX 16 /* 16 - 20 ms */ +#define PD_T_CHUNK_NOT_SUPP 42 /* 40 - 50 ms */ #define PD_T_DRP_TRY 100 /* 75 - 150 ms */ #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */ diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index 8c08eeb9a74b..b057250704e8 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -21,22 +21,24 @@ * ---------- * <31:16> :: SVID * <15> :: VDM type ( 1b == structured, 0b == unstructured ) - * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently) + * <14:13> :: Structured VDM version * <12:11> :: reserved * <10:8> :: object position (1-7 valid ... used for enter/exit mode only) * <7:6> :: command type (SVDM only?) * <5> :: reserved (SVDM), command type (UVDM) * <4:0> :: command */ -#define VDO(vid, type, custom) \ +#define VDO(vid, type, ver, custom) \ (((vid) << 16) | \ ((type) << 15) | \ + ((ver) << 13) | \ ((custom) & 0x7FFF)) #define VDO_SVDM_TYPE (1 << 15) #define VDO_SVDM_VERS(x) ((x) << 13) #define VDO_OPOS(x) ((x) << 8) #define VDO_CMDT(x) ((x) << 6) +#define VDO_SVDM_VERS_MASK VDO_SVDM_VERS(0x3) #define VDO_OPOS_MASK VDO_OPOS(0x7) #define VDO_CMDT_MASK VDO_CMDT(0x3) @@ -74,6 +76,7 @@ #define PD_VDO_VID(vdo) ((vdo) >> 16) #define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1) +#define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3) #define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7) #define PD_VDO_CMD(vdo) ((vdo) & 0x1f) #define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3) @@ -103,34 +106,50 @@ * -------------------- * <31> :: data capable as a USB host * <30> :: data capable as a USB device - * <29:27> :: product type (UFP / Cable) + * <29:27> :: product type (UFP / Cable / VPD) * <26> :: modal operation supported (1b == yes) - * <25:16> :: product type (DFP) + * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0) + * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0) + * <20:16> :: Reserved, Shall be set to zero * <15:0> :: USB-IF assigned VID for this cable vendor */ + +/* PD Rev2.0 definition */ #define IDH_PTYPE_UNDEF 0 + +/* SOP Product Type (UFP) */ +#define IDH_PTYPE_NOT_UFP 0 #define IDH_PTYPE_HUB 1 #define IDH_PTYPE_PERIPH 2 #define IDH_PTYPE_PSD 3 #define IDH_PTYPE_AMA 5 +/* SOP' Product Type (Cable Plug / VPD) */ +#define IDH_PTYPE_NOT_CABLE 0 #define IDH_PTYPE_PCABLE 3 #define IDH_PTYPE_ACABLE 4 +#define IDH_PTYPE_VPD 6 -#define IDH_PTYPE_DFP_UNDEF 0 +/* SOP Product Type (DFP) */ +#define IDH_PTYPE_NOT_DFP 0 #define IDH_PTYPE_DFP_HUB 1 #define IDH_PTYPE_DFP_HOST 2 #define IDH_PTYPE_DFP_PB 3 -#define IDH_PTYPE_DFP_AMC 4 -#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \ - ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \ - | (is_modal) << 26 | ((vid) & 0xffff)) +/* ID Header Mask */ +#define IDH_DFP_MASK GENMASK(25, 23) +#define IDH_CONN_MASK GENMASK(22, 21) + +#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \ + ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \ + | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \ + | ((vid) & 0xffff)) #define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7) #define PD_IDH_VID(vdo) ((vdo) & 0xffff) #define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26)) #define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7) +#define PD_IDH_CONN_TYPE(vdo) (((vdo) >> 21) & 0x3) /* * Cert Stat VDO @@ -138,6 +157,7 @@ * <31:0> : USB-IF assigned XID for this cable */ #define PD_CSTAT_XID(vdo) (vdo) +#define VDO_CERT(xid) ((xid) & 0xffffffff) /* * Product VDO @@ -149,79 +169,271 @@ #define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff) /* - * UFP VDO1 + * UFP VDO (PD Revision 3.0+ only) * -------- * <31:29> :: UFP VDO version * <28> :: Reserved * <27:24> :: Device capability - * <23:6> :: Reserved + * <23:22> :: Connector type (10b == receptacle, 11b == captive plug) + * <21:11> :: Reserved + * <10:8> :: Vconn power (AMA only) + * <7> :: Vconn required (AMA only, 0b == no, 1b == yes) + * <6> :: Vbus required (AMA only, 0b == yes, 1b == no) * <5:3> :: Alternate modes * <2:0> :: USB highest speed */ -#define PD_VDO1_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24) +#define PD_VDO_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24) + +/* UFP VDO Version */ +#define UFP_VDO_VER1_2 2 +/* Device Capability */ #define DEV_USB2_CAPABLE BIT(0) #define DEV_USB2_BILLBOARD BIT(1) #define DEV_USB3_CAPABLE BIT(2) #define DEV_USB4_CAPABLE BIT(3) +/* Connector Type */ +#define UFP_RECEPTACLE 2 +#define UFP_CAPTIVE 3 + +/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */ +#define AMA_VCONN_PWR_1W 0 +#define AMA_VCONN_PWR_1W5 1 +#define AMA_VCONN_PWR_2W 2 +#define AMA_VCONN_PWR_3W 3 +#define AMA_VCONN_PWR_4W 4 +#define AMA_VCONN_PWR_5W 5 +#define AMA_VCONN_PWR_6W 6 + +/* Vconn Required (AMA only) */ +#define AMA_VCONN_NOT_REQ 0 +#define AMA_VCONN_REQ 1 + +/* Vbus Required (AMA only) */ +#define AMA_VBUS_REQ 0 +#define AMA_VBUS_NOT_REQ 1 + +/* Alternate Modes */ +#define UFP_ALTMODE_NOT_SUPP 0 +#define UFP_ALTMODE_TBT3 BIT(0) +#define UFP_ALTMODE_RECFG BIT(1) +#define UFP_ALTMODE_NO_RECFG BIT(2) + +/* USB Highest Speed */ +#define UFP_USB2_ONLY 0 +#define UFP_USB32_GEN1 1 +#define UFP_USB32_4_GEN2 2 +#define UFP_USB4_GEN3 3 + +#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \ + (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \ + | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \ + | ((spd) & 0x7)) + /* - * DFP VDO + * DFP VDO (PD Revision 3.0+ only) * -------- * <31:29> :: DFP VDO version * <28:27> :: Reserved * <26:24> :: Host capability - * <23:5> :: Reserved + * <23:22> :: Connector type (10b == receptacle, 11b == captive plug) + * <21:5> :: Reserved * <4:0> :: Port number */ #define PD_VDO_DFP_HOSTCAP(vdo) (((vdo) & GENMASK(26, 24)) >> 24) +#define DFP_VDO_VER1_1 1 #define HOST_USB2_CAPABLE BIT(0) #define HOST_USB3_CAPABLE BIT(1) #define HOST_USB4_CAPABLE BIT(2) +#define DFP_RECEPTACLE 2 +#define DFP_CAPTIVE 3 + +#define VDO_DFP(ver, cap, conn, pnum) \ + (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \ + | ((pnum) & 0x1f)) /* - * Cable VDO + * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0) * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version * <23:20> :: Reserved, Shall be set to zero * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive) - * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle) + * <17> :: Reserved, Shall be set to zero * <16:13> :: cable latency (0001 == <10ns(~1m length)) * <12:11> :: cable termination type (11b == both ends active VCONN req) * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable) * <9> :: SSTX2 Directionality support * <8> :: SSRX1 Directionality support * <7> :: SSRX2 Directionality support - * <6:5> :: Vbus current handling capability + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) * <4> :: Vbus through cable (0b == no, 1b == yes) * <3> :: SOP" controller present? (0b == no, 1b == yes) * <2:0> :: USB SS Signaling support + * + * Passive Cable VDO (PD Rev3.0+) + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:21> :: VDO version + * <20> :: Reserved, Shall be set to zero + * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive) + * <17> :: Reserved, Shall be set to zero + * <16:13> :: cable latency (0001 == <10ns(~1m length)) + * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req) + * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <8:7> :: Reserved, Shall be set to zero + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) + * <4:3> :: Reserved, Shall be set to zero + * <2:0> :: USB highest speed + * + * Active Cable VDO 1 (PD Rev3.0+) + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:21> :: VDO version + * <20> :: Reserved, Shall be set to zero + * <19:18> :: Connector type (10b == C, 11b == Captive) + * <17> :: Reserved, Shall be set to zero + * <16:13> :: cable latency (0001 == <10ns(~1m length)) + * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req) + * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <8> :: SBU supported (0b == supported, 1b == not supported) + * <7> :: SBU type (0b == passive, 1b == active) + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) + * <4> :: Vbus through cable (0b == no, 1b == yes) + * <3> :: SOP" controller present? (0b == no, 1b == yes) + * <2:0> :: USB highest speed */ +/* Cable VDO Version */ +#define CABLE_VDO_VER1_0 0 +#define CABLE_VDO_VER1_3 3 + +/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */ #define CABLE_ATYPE 0 #define CABLE_BTYPE 1 #define CABLE_CTYPE 2 #define CABLE_CAPTIVE 3 -#define CABLE_PLUG 0 -#define CABLE_RECEPTACLE 1 -#define CABLE_CURR_1A5 0 + +/* Cable Latency */ +#define CABLE_LATENCY_1M 1 +#define CABLE_LATENCY_2M 2 +#define CABLE_LATENCY_3M 3 +#define CABLE_LATENCY_4M 4 +#define CABLE_LATENCY_5M 5 +#define CABLE_LATENCY_6M 6 +#define CABLE_LATENCY_7M 7 +#define CABLE_LATENCY_7M_PLUS 8 + +/* Cable Termination Type */ +#define PCABLE_VCONN_NOT_REQ 0 +#define PCABLE_VCONN_REQ 1 +#define ACABLE_ONE_END 2 +#define ACABLE_BOTH_END 3 + +/* Maximum Vbus Voltage */ +#define CABLE_MAX_VBUS_20V 0 +#define CABLE_MAX_VBUS_30V 1 +#define CABLE_MAX_VBUS_40V 2 +#define CABLE_MAX_VBUS_50V 3 + +/* Active Cable SBU Supported/Type */ +#define ACABLE_SBU_SUPP 0 +#define ACABLE_SBU_NOT_SUPP 1 +#define ACABLE_SBU_PASSIVE 0 +#define ACABLE_SBU_ACTIVE 1 + +/* Vbus Current Handling Capability */ +#define CABLE_CURR_DEF 0 #define CABLE_CURR_3A 1 #define CABLE_CURR_5A 2 + +/* USB SuperSpeed Signaling Support (PD Rev2.0) */ #define CABLE_USBSS_U2_ONLY 0 #define CABLE_USBSS_U31_GEN1 1 #define CABLE_USBSS_U31_GEN2 2 -#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\ - vps, sopp, usbss) \ - (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \ - | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \ - | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \ - | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \ - | ((usbss) & 0x7)) + +/* USB Highest Speed */ +#define CABLE_USB2_ONLY 0 +#define CABLE_USB32_GEN1 1 +#define CABLE_USB32_4_GEN2 2 +#define CABLE_USB4_GEN3 3 + +#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \ + (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \ + | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \ + | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \ + | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7)) +#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \ + | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7)) +#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \ + | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \ + | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7)) + #define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3) /* - * AMA VDO + * Active Cable VDO 2 + * --------- + * <31:24> :: Maximum operating temperature + * <23:16> :: Shutdown temperature + * <15> :: Reserved, Shall be set to zero + * <14:12> :: U3/CLd power + * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S) + * <10> :: Physical connection (0b == copper, 1b == optical) + * <9> :: Active element (0b == redriver, 1b == retimer) + * <8> :: USB4 supported (0b == yes, 1b == no) + * <7:6> :: USB2 hub hops consumed + * <5> :: USB2 supported (0b == yes, 1b == no) + * <4> :: USB3.2 supported (0b == yes, 1b == no) + * <3> :: USB lanes supported (0b == one lane, 1b == two lanes) + * <2> :: Optically isolated active cable (0b == no, 1b == yes) + * <1> :: Reserved, Shall be set to zero + * <0> :: USB gen (0b == gen1, 1b == gen2+) + */ +/* U3/CLd Power*/ +#define ACAB2_U3_CLD_10MW_PLUS 0 +#define ACAB2_U3_CLD_10MW 1 +#define ACAB2_U3_CLD_5MW 2 +#define ACAB2_U3_CLD_1MW 3 +#define ACAB2_U3_CLD_500UW 4 +#define ACAB2_U3_CLD_200UW 5 +#define ACAB2_U3_CLD_50UW 6 + +/* Other Active Cable VDO 2 Fields */ +#define ACAB2_U3U0_DIRECT 0 +#define ACAB2_U3U0_U3S 1 +#define ACAB2_PHY_COPPER 0 +#define ACAB2_PHY_OPTICAL 1 +#define ACAB2_REDRIVER 0 +#define ACAB2_RETIMER 1 +#define ACAB2_USB4_SUPP 0 +#define ACAB2_USB4_NOT_SUPP 1 +#define ACAB2_USB2_SUPP 0 +#define ACAB2_USB2_NOT_SUPP 1 +#define ACAB2_USB32_SUPP 0 +#define ACAB2_USB32_NOT_SUPP 1 +#define ACAB2_LANES_ONE 0 +#define ACAB2_LANES_TWO 1 +#define ACAB2_OPT_ISO_NO 0 +#define ACAB2_OPT_ISO_YES 1 +#define ACAB2_GEN_1 0 +#define ACAB2_GEN_2_PLUS 1 + +#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \ + (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \ + | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \ + | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \ + | (iso) << 2 | (gen)) + +/* + * AMA VDO (PD Rev2.0) * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version @@ -244,19 +456,41 @@ #define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1) #define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1) -#define AMA_VCONN_PWR_1W 0 -#define AMA_VCONN_PWR_1W5 1 -#define AMA_VCONN_PWR_2W 2 -#define AMA_VCONN_PWR_3W 3 -#define AMA_VCONN_PWR_4W 4 -#define AMA_VCONN_PWR_5W 5 -#define AMA_VCONN_PWR_6W 6 #define AMA_USBSS_U2_ONLY 0 #define AMA_USBSS_U31_GEN1 1 #define AMA_USBSS_U31_GEN2 2 #define AMA_USBSS_BBONLY 3 /* + * VPD VDO + * --------- + * <31:28> :: HW version + * <27:24> :: FW version + * <23:21> :: VDO version + * <20:17> :: Reserved, Shall be set to zero + * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V) + * <14> :: Charge through current support (0b == 3A, 1b == 5A) + * <13> :: Reserved, Shall be set to zero + * <12:7> :: Vbus impedance + * <6:1> :: Ground impedance + * <0> :: Charge through support (0b == no, 1b == yes) + */ +#define VPD_VDO_VER1_0 0 +#define VPD_MAX_VBUS_20V 0 +#define VPD_MAX_VBUS_30V 1 +#define VPD_MAX_VBUS_40V 2 +#define VPD_MAX_VBUS_50V 3 +#define VPDCT_CURR_3A 0 +#define VPDCT_CURR_5A 1 +#define VPDCT_NOT_SUPP 0 +#define VPDCT_SUPP 1 + +#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \ + (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ + | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \ + | ((gi) & 0x3f) << 1 | (ct)) + +/* * SVDM Discover SVIDs request -> response * * Request is properly formatted VDM Header with discover SVIDs command. diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 1c09b922f8b0..952272002e48 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -260,7 +260,7 @@ struct usb_serial_driver { void (*release)(struct usb_serial *serial); int (*port_probe)(struct usb_serial_port *port); - int (*port_remove)(struct usb_serial_port *port); + void (*port_remove)(struct usb_serial_port *port); int (*suspend)(struct usb_serial *serial, pm_message_t message); int (*resume)(struct usb_serial *serial); diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index f4a18427f5c4..42fcfbe10590 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -19,6 +19,10 @@ enum typec_cc_status { TYPEC_CC_RP_3_0, }; +/* Collision Avoidance */ +#define SINK_TX_NG TYPEC_CC_RP_1_5 +#define SINK_TX_OK TYPEC_CC_RP_3_0 + enum typec_cc_polarity { TYPEC_POLARITY_CC1, TYPEC_POLARITY_CC2, @@ -104,6 +108,10 @@ enum tcpm_transmit_type { * is supported by TCPC, set this callback for TCPM to query * whether vbus is at VSAFE0V when needed. * Returns true when vbus is at VSAFE0V, false otherwise. + * @set_partner_usb_comm_capable: + * Optional; The USB Communications Capable bit indicates if port + * partner is capable of communication over the USB data lines + * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit. */ struct tcpc_dev { struct fwnode_handle *fwnode; @@ -135,6 +143,7 @@ struct tcpc_dev { int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode, bool pps_active, u32 requested_vbus_voltage); bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev); + void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable); }; struct tcpm_port; diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index c29d1b4c9381..fd1c9f6a4e37 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -79,6 +79,8 @@ struct tegra_usb_phy { bool is_ulpi_phy; struct gpio_desc *reset_gpio; struct reset_control *pad_rst; + bool wakeup_enabled; + bool pad_wakeup; bool powered_on; }; diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 54475323f83b..91b4303ca305 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -126,6 +126,7 @@ struct typec_altmode_desc { enum typec_port_data roles; }; +void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision); int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes); struct typec_altmode *typec_partner_register_altmode(struct typec_partner *partner, @@ -164,6 +165,7 @@ struct typec_plug_desc { * @type: The plug type from USB PD Cable VDO * @active: Is the cable active or passive * @identity: Result of Discover Identity command + * @pd_revision: USB Power Delivery Specification revision if supported * * Represents USB Type-C Cable attached to USB Type-C port. */ @@ -171,6 +173,8 @@ struct typec_cable_desc { enum typec_plug_type type; unsigned int active:1; struct usb_pd_identity *identity; + u16 pd_revision; /* 0300H = "3.0" */ + }; /* @@ -178,15 +182,22 @@ struct typec_cable_desc { * @usb_pd: USB Power Delivery support * @accessory: Audio, Debug or none. * @identity: Discover Identity command data + * @pd_revision: USB Power Delivery Specification Revision if supported * * Details about a partner that is attached to USB Type-C port. If @identity * member exists when partner is registered, a directory named "identity" is * created to sysfs for the partner device. + * + * @pd_revision is based on the setting of the "Specification Revision" field + * in the message header on the initial "Source Capabilities" message received + * from the partner, or a "Request" message received from the partner, depending + * on whether our port is a Sink or a Source. */ struct typec_partner_desc { unsigned int usb_pd:1; enum typec_accessory accessory; struct usb_pd_identity *identity; + u16 pd_revision; /* 0300H = "3.0" */ }; /** @@ -206,12 +217,19 @@ struct typec_operations { enum typec_port_type type); }; +enum usb_pd_svdm_ver { + SVDM_VER_1_0 = 0, + SVDM_VER_2_0 = 1, + SVDM_VER_MAX = SVDM_VER_2_0, +}; + /* * struct typec_capability - USB Type-C Port Capabilities * @type: Supported power role of the port * @data: Supported data role of the port * @revision: USB Type-C Specification release. Binary coded decimal * @pd_revision: USB Power Delivery Specification revision if supported + * @svdm_version: USB PD Structured VDM version if supported * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes * @fwnode: Optional fwnode of the port @@ -225,6 +243,7 @@ struct typec_capability { enum typec_port_data data; u16 revision; /* 0120H = "1.2" */ u16 pd_revision; /* 0300H = "3.0" */ + enum usb_pd_svdm_ver svdm_version; int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; unsigned int orientation_aware:1; @@ -275,4 +294,8 @@ int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); int typec_find_power_role(const char *name); int typec_find_port_data_role(const char *name); + +void typec_partner_set_svdm_version(struct typec_partner *partner, + enum usb_pd_svdm_ver svdm_version); +int typec_get_negotiated_svdm_version(struct typec_port *port); #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index 5e0a7b7647c3..65933cbe9129 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -133,6 +133,16 @@ typec_altmode_get_orientation(struct typec_altmode *altmode) } /** + * typec_altmode_get_svdm_version - Get negotiated SVDM version + * @altmode: Handle to the alternate mode + */ +static inline int +typec_altmode_get_svdm_version(struct typec_altmode *altmode) +{ + return typec_get_negotiated_svdm_version(typec_altmode2port(altmode)); +} + +/** * struct typec_altmode_driver - USB Type-C alternate mode device driver * @id_table: Null terminated array of SVIDs * @probe: Callback for device binding diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 0fefeb976877..4ab5494503a8 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -35,6 +35,8 @@ struct vdpa_vq_state { u16 avail_index; }; +struct vdpa_mgmt_dev; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -43,6 +45,8 @@ struct vdpa_vq_state { * @index: device index * @features_valid: were features initialized? for legacy guests * @nvqs: maximum number of supported virtqueues + * @mdev: management device pointer; caller must setup when registering device as part + * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). */ struct vdpa_device { struct device dev; @@ -51,6 +55,7 @@ struct vdpa_device { unsigned int index; bool features_valid; int nvqs; + struct vdpa_mgmt_dev *mdev; }; /** @@ -245,20 +250,22 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - int nvqs, - size_t size); + int nvqs, size_t size, const char *name); -#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \ container_of(__vdpa_alloc_device( \ parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - dev_struct, member))), \ + dev_struct, member)), name), \ dev_struct, member) int vdpa_register_device(struct vdpa_device *vdev); void vdpa_unregister_device(struct vdpa_device *vdev); +int _vdpa_register_device(struct vdpa_device *vdev); +void _vdpa_unregister_device(struct vdpa_device *vdev); + /** * vdpa_driver - operations for a vDPA driver * @driver: underlying device driver @@ -336,4 +343,33 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, ops->get_config(vdev, offset, buf, len); } +/** + * vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). + */ +struct vdpa_mgmtdev_ops { + int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); + void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); +}; + +struct vdpa_mgmt_dev { + struct device *device; + const struct vdpa_mgmtdev_ops *ops; + const struct virtio_device_id *id_table; /* supported ids */ + struct list_head list; +}; + +int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); +void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); + #endif /* _LINUX_VDPA_H */ diff --git a/include/linux/verification.h b/include/linux/verification.h index 911ab7c2b1ab..a655923335ae 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -8,6 +8,8 @@ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H +#include <linux/types.h> + /* * Indicate that both builtin trusted keys and secondary trusted keys * should be used. diff --git a/include/linux/vfio.h b/include/linux/vfio.h index f45940b38a02..b7e18bde5aa8 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -57,6 +57,11 @@ extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); extern void *vfio_device_data(struct vfio_device *device); +/* events for the backend driver notify callback */ +enum vfio_iommu_notify_type { + VFIO_IOMMU_CONTAINER_CLOSE = 0, +}; + /** * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks */ @@ -92,6 +97,8 @@ struct vfio_iommu_driver_ops { void *data, size_t count, bool write); struct iommu_domain *(*group_iommu_domain)(void *iommu_data, struct iommu_group *group); + void (*notify)(void *iommu_data, + enum vfio_iommu_notify_type event); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index e8a924eeea3d..6b5fcfa1e555 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -79,8 +79,13 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type && skb->network_header) { struct flow_keys_basic keys; - if (!skb->protocol) + if (!skb->protocol) { + __be16 protocol = dev_parse_header_protocol(skb); + virtio_net_hdr_set_proto(skb, hdr); + if (protocol && protocol != skb->protocol) + return -EINVAL; + } retry: if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, NULL, 0, 0, 0, diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h new file mode 100644 index 000000000000..f26acbeec965 --- /dev/null +++ b/include/linux/virtio_pci_modern.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_PCI_MODERN_H +#define _LINUX_VIRTIO_PCI_MODERN_H + +#include <linux/pci.h> +#include <linux/virtio_pci.h> + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + int modern_bars; + + struct virtio_device_id id; +}; + +/* + * Type-safe wrappers for io accesses. + * Use these to enforce at compile time the following spec requirement: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses + * for 16-bit fields and 8-bit accesses for 8-bit fields. + */ +static inline u8 vp_ioread8(const u8 __iomem *addr) +{ + return ioread8(addr); +} +static inline u16 vp_ioread16 (const __le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 vp_ioread32(const __le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void vp_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static inline void vp_iowrite64_twopart(u64 val, + __le32 __iomem *lo, + __le32 __iomem *hi) +{ + vp_iowrite32((u32)val, lo); + vp_iowrite32(val >> 32, hi); +} + +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); +void vp_modern_set_features(struct virtio_pci_modern_device *mdev, + u64 features); +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); +void vp_modern_set_status(struct virtio_pci_modern_device *mdev, + u8 status); +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, + u16 idx, u16 vector); +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, + u16 vector); +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, + u16 index, u64 desc_addr, u64 driver_addr, + u64 device_addr); +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx, bool enable); +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, + u16 idx); +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx, u16 size); +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, + u16 idx); +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 idx); +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len); +int vp_modern_probe(struct virtio_pci_modern_device *mdev); +void vp_modern_remove(struct virtio_pci_modern_device *mdev); +#endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 80c0181c411d..df92211cf771 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -24,7 +24,8 @@ struct notifier_block; /* in notifier.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ -#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */ +#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ +#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ /* * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. @@ -37,12 +38,6 @@ struct notifier_block; /* in notifier.h */ * determine which allocations need the module shadow freed. */ -/* - * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with - * vfree_atomic(). - */ -#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ - /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -246,4 +241,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); +#ifdef CONFIG_MMU +bool vmalloc_dump_obj(void *object); +#else +static inline bool vmalloc_dump_obj(void *object) { return false; } +#endif + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vme.h b/include/linux/vme.h index 7e82bf500f01..b204a9b4be1b 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -122,7 +122,7 @@ struct vme_driver { const char *name; int (*match)(struct vme_dev *); int (*probe)(struct vme_dev *); - int (*remove)(struct vme_dev *); + void (*remove)(struct vme_dev *); struct device_driver driver; struct list_head devices; }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 773135fc6e19..506d625163a1 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -313,6 +313,12 @@ static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { if (vmstat_item_in_bytes(item)) { + /* + * Only cgroups use subpage accounting right now; at + * the global level, these items still change in + * multiples of whole pages. Store them as pages + * internally to keep the per-cpu counters compact. + */ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); delta >>= PAGE_SHIFT; } diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index be0afe6f379b..e36cb114c188 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -66,7 +66,7 @@ enum { * consists of at least two pages, the memory limit also dictates the * number of queue pairs a guest can create. */ -#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) +#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024)) #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) /* @@ -80,7 +80,7 @@ enum { * too much kernel memory (especially on vmkernel). We limit a queuepair to * 32 KB, or 16 KB per queue for symmetrical pairs. */ -#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024) +#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024)) /* * We have a fixed set of resource IDs available in the VMX. diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 349e39c3ab60..94e7a315479c 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -16,18 +16,6 @@ #include <linux/consolemap.h> #include <linux/notifier.h> -/* - * Presently, a lot of graphics programs do not restore the contents of - * the higher font pages. Defining this flag will avoid use of them, but - * will lose support for PIO_FONTRESET. Note that many font operations are - * not likely to work with these programs anyway; they need to be - * fixed. The linux/Documentation directory includes a code snippet - * to save and restore the text font. - */ -#ifdef CONFIG_VGA_CONSOLE -#define BROKEN_GRAPHICS_PROGRAMS 1 -#endif - void kd_mksound(unsigned int hz, unsigned int ticks); int kbd_rate(struct kbd_repeat *rep); diff --git a/include/linux/w1.h b/include/linux/w1.h index 949d3b10e531..9a2a0ef39018 100644 --- a/include/linux/w1.h +++ b/include/linux/w1.h @@ -280,7 +280,7 @@ int w1_register_family(struct w1_family *family); void w1_unregister_family(struct w1_family *family); /** - * module_w1_driver() - Helper macro for registering a 1-Wire families + * module_w1_family() - Helper macro for registering a 1-Wire families * @__w1_family: w1_family struct * * Helper macro for 1-Wire families which do not do anything special in module diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h index 58e082dadc68..462854f4f286 100644 --- a/include/linux/wm97xx.h +++ b/include/linux/wm97xx.h @@ -294,7 +294,6 @@ struct wm97xx { struct wm97xx_batt_pdata { int batt_aux; int temp_aux; - int charge_gpio; int min_voltage; int max_voltage; int batt_div; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 26de0cae2a0a..d15a7730ee18 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -311,7 +311,7 @@ enum { WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ - WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ + WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ /* * Per-cpu workqueues are generally preferred because they tend to diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 10b4dc2709f0..4c379d23ec6e 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/user_namespace.h> #include <uapi/linux/xattr.h> struct inode; @@ -34,7 +35,8 @@ struct xattr_handler { int (*get)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); - int (*set)(const struct xattr_handler *, struct dentry *dentry, + int (*set)(const struct xattr_handler *, + struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); }; @@ -48,18 +50,26 @@ struct xattr { }; ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t); -ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); +ssize_t vfs_getxattr(struct user_namespace *, struct dentry *, const char *, + void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); -int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); -int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); -int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); -int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); -int __vfs_removexattr(struct dentry *, const char *); -int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); -int vfs_removexattr(struct dentry *, const char *); +int __vfs_setxattr(struct user_namespace *, struct dentry *, struct inode *, + const char *, const void *, size_t, int); +int __vfs_setxattr_noperm(struct user_namespace *, struct dentry *, + const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct user_namespace *, struct dentry *, + const char *, const void *, size_t, int, + struct inode **); +int vfs_setxattr(struct user_namespace *, struct dentry *, const char *, + const void *, size_t, int); +int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *); +int __vfs_removexattr_locked(struct user_namespace *, struct dentry *, + const char *, struct inode **); +int vfs_removexattr(struct user_namespace *, struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, +ssize_t vfs_getxattr_alloc(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); int xattr_supported_namespace(struct inode *inode, const char *prefix); diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h index eaba53ff387c..9e8be7a7cd25 100644 --- a/include/linux/z2_battery.h +++ b/include/linux/z2_battery.h @@ -6,7 +6,6 @@ struct z2_battery_info { int batt_I2C_bus; int batt_I2C_addr; int batt_I2C_reg; - int charge_gpio; int min_voltage; int max_voltage; int batt_div; diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 51bf43076165..e8997010612a 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -73,6 +73,7 @@ u64 zpool_get_total_size(struct zpool *pool); * @malloc: allocate mem from a pool. * @free: free mem from a pool. * @shrink: shrink the pool. + * @sleep_mapped: whether zpool driver can sleep during map. * @map: map a handle. * @unmap: unmap a handle. * @total_size: get total size of a pool. @@ -100,6 +101,7 @@ struct zpool_driver { int (*shrink)(void *pool, unsigned int pages, unsigned int *reclaimed); + bool sleep_mapped; void *(*map)(void *pool, unsigned long handle, enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); @@ -112,5 +114,6 @@ void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); bool zpool_evictable(struct zpool *pool); +bool zpool_can_sleep_mapped(struct zpool *pool); #endif diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 4807ca4d52e0..2a430e713ce5 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -35,7 +35,7 @@ enum zs_mapmode { struct zs_pool_stats { /* How many pages were migrated (freed) */ - unsigned long pages_compacted; + atomic_long_t pages_compacted; }; struct zs_pool; diff --git a/include/linux/zstd.h b/include/linux/zstd.h index 249575e2485f..e87f78c9b19c 100644 --- a/include/linux/zstd.h +++ b/include/linux/zstd.h @@ -791,11 +791,11 @@ size_t ZSTD_DStreamOutSize(void); /* for static allocation */ #define ZSTD_FRAMEHEADERSIZE_MAX 18 #define ZSTD_FRAMEHEADERSIZE_MIN 6 -static const size_t ZSTD_frameHeaderSize_prefix = 5; -static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; -static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +#define ZSTD_frameHeaderSize_prefix 5 +#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN +#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX /* magic number + skippable frame length */ -static const size_t ZSTD_skippableHeaderSize = 8; +#define ZSTD_skippableHeaderSize 8 /*-************************************* diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h index 8439e46fb993..d03e5c54347a 100644 --- a/include/media/davinci/vpif_types.h +++ b/include/media/davinci/vpif_types.h @@ -48,8 +48,6 @@ struct vpif_display_config { int i2c_adapter_id; struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS]; const char *card_name; - struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ - int *asd_sizes; /* 0-terminated array of asd group sizes */ }; struct vpif_input { diff --git a/include/media/frame_vector.h b/include/media/frame_vector.h new file mode 100644 index 000000000000..bfed1710dc24 --- /dev/null +++ b/include/media/frame_vector.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _MEDIA_FRAME_VECTOR_H +#define _MEDIA_FRAME_VECTOR_H + +/* Container for pinned pfns / pages in frame_vector.c */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +#endif /* _MEDIA_FRAME_VECTOR_H */ diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 999b750bc6b8..30f138ebab6f 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -175,6 +175,13 @@ struct rc_map_list { struct rc_map map; }; +#ifdef CONFIG_MEDIA_CEC_RC +/* + * rc_map_list from rc-cec.c + */ +extern struct rc_map_list cec_map; +#endif + /* Routines from rc-map.c */ /** diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 0e04b5b2ebb0..f572e1279182 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -11,6 +11,7 @@ #include <linux/list.h> #include <linux/mutex.h> +struct dentry; struct device; struct device_node; struct v4l2_device; @@ -21,9 +22,6 @@ struct v4l2_async_notifier; * enum v4l2_async_match_type - type of asynchronous subdevice logic to be used * in order to identify a match * - * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct - * v4l2_async_subdev.match ops - * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node * @@ -31,8 +29,6 @@ struct v4l2_async_notifier; * algorithm that will be used to match an asynchronous device. */ enum v4l2_async_match_type { - V4L2_ASYNC_MATCH_CUSTOM, - V4L2_ASYNC_MATCH_DEVNAME, V4L2_ASYNC_MATCH_I2C, V4L2_ASYNC_MATCH_FWNODE, }; @@ -45,9 +41,6 @@ enum v4l2_async_match_type { * @match.fwnode: * pointer to &struct fwnode_handle to be matched. * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE. - * @match.device_name: - * string containing the device name to be matched. - * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME. * @match.i2c: embedded struct with I2C parameters to be matched. * Both @match.i2c.adapter_id and @match.i2c.address * should be matched. @@ -58,15 +51,6 @@ enum v4l2_async_match_type { * @match.i2c.address: * I2C address to be matched. * Used if @match_type is %V4L2_ASYNC_MATCH_I2C. - * @match.custom: - * Driver-specific match criteria. - * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM. - * @match.custom.match: - * Driver-specific match function to be used if - * %V4L2_ASYNC_MATCH_CUSTOM. - * @match.custom.priv: - * Driver-specific private struct with match parameters - * to be used if %V4L2_ASYNC_MATCH_CUSTOM. * @asd_list: used to add struct v4l2_async_subdev objects to the * master notifier @asd_list * @list: used to link struct v4l2_async_subdev objects, waiting to be @@ -80,16 +64,10 @@ struct v4l2_async_subdev { enum v4l2_async_match_type match_type; union { struct fwnode_handle *fwnode; - const char *device_name; struct { int adapter_id; unsigned short address; } i2c; - struct { - bool (*match)(struct device *dev, - struct v4l2_async_subdev *sd); - void *priv; - } custom; } match; /* v4l2-async core private: not to be used by drivers */ @@ -138,60 +116,84 @@ struct v4l2_async_notifier { }; /** + * v4l2_async_debug_init - Initialize debugging tools. + * + * @debugfs_dir: pointer to the parent debugfs &struct dentry + */ +void v4l2_async_debug_init(struct dentry *debugfs_dir); + +/** * v4l2_async_notifier_init - Initialize a notifier. * * @notifier: pointer to &struct v4l2_async_notifier * * This function initializes the notifier @asd_list. It must be called - * before the first call to @v4l2_async_notifier_add_subdev. + * before adding a subdevice to a notifier, using one of: + * @v4l2_async_notifier_add_fwnode_remote_subdev, + * @v4l2_async_notifier_add_fwnode_subdev, + * @v4l2_async_notifier_add_i2c_subdev, + * @__v4l2_async_notifier_add_subdev or + * @v4l2_async_notifier_parse_fwnode_endpoints. */ void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier); /** - * v4l2_async_notifier_add_subdev - Add an async subdev to the + * __v4l2_async_notifier_add_subdev - Add an async subdev to the * notifier's master asd list. * * @notifier: pointer to &struct v4l2_async_notifier * @asd: pointer to &struct v4l2_async_subdev * + * \warning: Drivers should avoid using this function and instead use one of: + * @v4l2_async_notifier_add_fwnode_subdev, + * @v4l2_async_notifier_add_fwnode_remote_subdev or + * @v4l2_async_notifier_add_i2c_subdev. + * * Call this function before registering a notifier to link the provided @asd to * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as * it will be freed by the framework when the notifier is destroyed. */ -int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, +int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, struct v4l2_async_subdev *asd); +struct v4l2_async_subdev * +__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, + struct fwnode_handle *fwnode, + unsigned int asd_struct_size); /** * v4l2_async_notifier_add_fwnode_subdev - Allocate and add a fwnode async * subdev to the notifier's master asd_list. * * @notifier: pointer to &struct v4l2_async_notifier - * @fwnode: fwnode handle of the sub-device to be matched - * @asd_struct_size: size of the driver's async sub-device struct, including - * sizeof(struct v4l2_async_subdev). The &struct - * v4l2_async_subdev shall be the first member of - * the driver's async sub-device struct, i.e. both - * begin at the same memory address. + * @fwnode: fwnode handle of the sub-device to be matched, pointer to + * &struct fwnode_handle + * @type: Type of the driver's async sub-device struct. The &struct + * v4l2_async_subdev shall be the first member of the driver's async + * sub-device struct, i.e. both begin at the same memory address. * * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the * notifiers @asd_list. The function also gets a reference of the fwnode which * is released later at notifier cleanup time. */ -struct v4l2_async_subdev * -v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, - struct fwnode_handle *fwnode, - unsigned int asd_struct_size); +#define v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, type) \ + ((type *)__v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, \ + sizeof(type))) +struct v4l2_async_subdev * +__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + unsigned int asd_struct_size); /** * v4l2_async_notifier_add_fwnode_remote_subdev - Allocate and add a fwnode * remote async subdev to the * notifier's master asd_list. * - * @notif: pointer to &struct v4l2_async_notifier - * @endpoint: local endpoint pointing to the remote sub-device to be matched - * @asd: Async sub-device struct allocated by the caller. The &struct - * v4l2_async_subdev shall be the first member of the driver's async - * sub-device struct, i.e. both begin at the same memory address. + * @notifier: pointer to &struct v4l2_async_notifier + * @ep: local endpoint pointing to the remote sub-device to be matched, + * pointer to &struct fwnode_handle + * @type: Type of the driver's async sub-device struct. The &struct + * v4l2_async_subdev shall be the first member of the driver's async + * sub-device struct, i.e. both begin at the same memory address. * * Gets the remote endpoint of a given local endpoint, set it up for fwnode * matching and adds the async sub-device to the notifier's @asd_list. The @@ -199,52 +201,34 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, * notifier cleanup time. * * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the - * exception that the fwnode refers to a local endpoint, not the remote one, and - * the function relies on the caller to allocate the async sub-device struct. + * exception that the fwnode refers to a local endpoint, not the remote one. */ -int -v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif, - struct fwnode_handle *endpoint, - struct v4l2_async_subdev *asd); +#define v4l2_async_notifier_add_fwnode_remote_subdev(notifier, ep, type) \ + ((type *) \ + __v4l2_async_notifier_add_fwnode_remote_subdev(notifier, ep, \ + sizeof(type))) +struct v4l2_async_subdev * +__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier, + int adapter_id, unsigned short address, + unsigned int asd_struct_size); /** * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async * subdev to the notifier's master asd_list. * * @notifier: pointer to &struct v4l2_async_notifier - * @adapter_id: I2C adapter ID to be matched + * @adapter: I2C adapter ID to be matched * @address: I2C address of sub-device to be matched - * @asd_struct_size: size of the driver's async sub-device struct, including - * sizeof(struct v4l2_async_subdev). The &struct - * v4l2_async_subdev shall be the first member of - * the driver's async sub-device struct, i.e. both - * begin at the same memory address. + * @type: Type of the driver's async sub-device struct. The &struct + * v4l2_async_subdev shall be the first member of the driver's async + * sub-device struct, i.e. both begin at the same memory address. * - * Same as above but for I2C matched sub-devices. + * Same as v4l2_async_notifier_add_fwnode_subdev() but for I2C matched + * sub-devices. */ -struct v4l2_async_subdev * -v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier, - int adapter_id, unsigned short address, - unsigned int asd_struct_size); - -/** - * v4l2_async_notifier_add_devname_subdev - Allocate and add a device-name - * async subdev to the notifier's master asd_list. - * - * @notifier: pointer to &struct v4l2_async_notifier - * @device_name: device name string to be matched - * @asd_struct_size: size of the driver's async sub-device struct, including - * sizeof(struct v4l2_async_subdev). The &struct - * v4l2_async_subdev shall be the first member of - * the driver's async sub-device struct, i.e. both - * begin at the same memory address. - * - * Same as above but for device-name matched sub-devices. - */ -struct v4l2_async_subdev * -v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier, - const char *device_name, - unsigned int asd_struct_size); +#define v4l2_async_notifier_add_i2c_subdev(notifier, adapter, address, type) \ + ((type *)__v4l2_async_notifier_add_i2c_subdev(notifier, adapter, \ + address, sizeof(type))) /** * v4l2_async_notifier_register - registers a subdevice asynchronous notifier @@ -281,9 +265,11 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier); * sub-devices allocated for the purposes of the notifier but not the notifier * itself. The user is responsible for calling this function to clean up the * notifier after calling - * @v4l2_async_notifier_add_subdev, - * @v4l2_async_notifier_parse_fwnode_endpoints or - * @v4l2_fwnode_reference_parse_sensor_common. + * @v4l2_async_notifier_add_fwnode_remote_subdev, + * @v4l2_async_notifier_add_fwnode_subdev, + * @v4l2_async_notifier_add_i2c_subdev, + * @__v4l2_async_notifier_add_subdev or + * @v4l2_async_notifier_parse_fwnode_endpoints. * * There is no harm from calling v4l2_async_notifier_cleanup in other * cases as long as its memory has been zeroed after it has been diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h deleted file mode 100644 index d9d21a43a834..000000000000 --- a/include/media/v4l2-clk.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * V4L2 clock service - * - * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * ATTENTION: This is a temporary API and it shall be replaced by the generic - * clock API, when the latter becomes widely available. - */ - -#ifndef MEDIA_V4L2_CLK_H -#define MEDIA_V4L2_CLK_H - -#include <linux/atomic.h> -#include <linux/export.h> -#include <linux/list.h> -#include <linux/mutex.h> - -struct module; -struct device; - -struct clk; -struct v4l2_clk { - struct list_head list; - const struct v4l2_clk_ops *ops; - const char *dev_id; - int enable; - struct mutex lock; /* Protect the enable count */ - atomic_t use_count; - struct clk *clk; - void *priv; -}; - -struct v4l2_clk_ops { - struct module *owner; - int (*enable)(struct v4l2_clk *clk); - void (*disable)(struct v4l2_clk *clk); - unsigned long (*get_rate)(struct v4l2_clk *clk); - int (*set_rate)(struct v4l2_clk *clk, unsigned long); -}; - -struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops, - const char *dev_name, - void *priv); -void v4l2_clk_unregister(struct v4l2_clk *clk); -struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id); -void v4l2_clk_put(struct v4l2_clk *clk); -int v4l2_clk_enable(struct v4l2_clk *clk); -void v4l2_clk_disable(struct v4l2_clk *clk); -unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk); -int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate); - -struct module; - -struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id, - unsigned long rate, struct module *owner); -void v4l2_clk_unregister_fixed(struct v4l2_clk *clk); - -static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id, - unsigned long rate) -{ - return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE); -} - -#define V4L2_CLK_NAME_SIZE 64 - -#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \ - "%d-%04x", adap, client) - -#define v4l2_clk_name_of(name, size, node) snprintf(name, size, \ - "of-%pOF", node) - -#endif diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index be36cbdcc1bd..3eb202259e8c 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -520,7 +520,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, u32 width, u32 height); /** - * v4l2_get_link_rate - Get link rate from transmitter + * v4l2_get_link_freq - Get link rate from transmitter * * @handler: The transmitter's control handler * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on @@ -537,7 +537,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, * -ENOENT: Link frequency or pixel rate control not found * -EINVAL: Invalid link frequency value */ -s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul, +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, unsigned int div); static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf) diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h index 3f0281d06ec7..4ffa914ade3a 100644 --- a/include/media/v4l2-event.h +++ b/include/media/v4l2-event.h @@ -101,7 +101,7 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, * * .. note:: * The driver's only responsibility is to fill in the type and the data - * fields.The other fields will be filled in by V4L2. + * fields. The other fields will be filled in by V4L2. */ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev); @@ -116,11 +116,20 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev); * * .. note:: * The driver's only responsibility is to fill in the type and the data - * fields.The other fields will be filled in by V4L2. + * fields. The other fields will be filled in by V4L2. */ void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev); /** + * v4l2_event_wake_all - Wake all filehandles. + * + * Used when unregistering a video device. + * + * @vdev: pointer to &struct video_device + */ +void v4l2_event_wake_all(struct video_device *vdev); + +/** * v4l2_event_pending - Check if an event is available * * @fh: pointer to &struct v4l2_fh diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index 4365430eea6f..80d21ad8d603 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -25,7 +25,7 @@ struct fwnode_handle; struct v4l2_async_notifier; struct v4l2_async_subdev; -#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 4 +#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 8 /** * struct v4l2_fwnode_bus_mipi_csi2 - MIPI CSI-2 bus data structure @@ -214,6 +214,28 @@ struct v4l2_fwnode_connector { }; /** + * enum v4l2_fwnode_bus_type - Video bus types defined by firmware properties + * @V4L2_FWNODE_BUS_TYPE_GUESS: Default value if no bus-type fwnode property + * @V4L2_FWNODE_BUS_TYPE_CSI2_CPHY: MIPI CSI-2 bus, C-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_CSI1: MIPI CSI-1 bus + * @V4L2_FWNODE_BUS_TYPE_CCP2: SMIA Compact Camera Port 2 bus + * @V4L2_FWNODE_BUS_TYPE_CSI2_DPHY: MIPI CSI-2 bus, D-PHY physical layer + * @V4L2_FWNODE_BUS_TYPE_PARALLEL: Camera Parallel Interface bus + * @V4L2_FWNODE_BUS_TYPE_BT656: BT.656 video format bus-type + * @NR_OF_V4L2_FWNODE_BUS_TYPE: Number of bus-types + */ +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + V4L2_FWNODE_BUS_TYPE_CSI2_DPHY, + V4L2_FWNODE_BUS_TYPE_PARALLEL, + V4L2_FWNODE_BUS_TYPE_BT656, + NR_OF_V4L2_FWNODE_BUS_TYPE +}; + +/** * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties * @fwnode: pointer to the endpoint's fwnode handle * @vep: pointer to the V4L2 fwnode data structure @@ -453,6 +475,10 @@ typedef int (*parse_endpoint_func)(struct device *dev, * @parse_endpoint: Driver's callback function called on each V4L2 fwnode * endpoint. Optional. * + * DEPRECATED! This function is deprecated. Don't use it in new drivers. + * Instead see an example in cio2_parse_firmware() function in + * drivers/media/pci/intel/ipu3/ipu3-cio2.c . + * * Parse the fwnode endpoints of the @dev device and populate the async sub- * devices list in the notifier. The @parse_endpoint callback function is * called for each endpoint with the corresponding async sub-device pointer to diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 61969402a0e3..799ba61b5b6f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -18,6 +18,7 @@ #include <linux/dma-buf.h> #include <linux/bitops.h> #include <media/media-request.h> +#include <media/frame_vector.h> #define VB2_MAX_FRAME (32) #define VB2_MAX_PLANES (8) diff --git a/include/net/act_api.h b/include/net/act_api.h index 55dab604861f..2bf3092ae7ec 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -166,6 +166,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, u32 flags); +void tcf_idr_insert_many(struct tc_action *actions[]); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, struct tc_action **a, int bind); @@ -186,10 +187,13 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, struct tc_action *actions[], size_t *attr_size, bool rtnl_held, struct netlink_ext_ack *extack); +struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, + bool rtnl_held, + struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - bool rtnl_held, + struct tc_action_ops *ops, bool rtnl_held, struct netlink_ext_ack *extack); int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index c1504aa3d9cf..ba2f439bc04d 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -238,6 +238,14 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, + + /* + * When this quirk is set, then the hci_suspend_notifier is not + * registered. This is intended for devices which drop completely + * from the bus on system-suspend and which will show up as a new + * HCI after resume. + */ + HCI_QUIRK_NO_SUSPEND_NOTIFIER, }; /* HCI device flags */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 677a8c50b2ad..ebdd4afe30d2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -105,6 +105,8 @@ enum suspend_tasks { SUSPEND_POWERING_DOWN, SUSPEND_PREPARE_NOTIFIER, + + SUSPEND_SET_ADV_FILTER, __SUSPEND_NUM_TASKS }; @@ -250,15 +252,31 @@ struct adv_pattern { __u8 value[HCI_MAX_AD_LENGTH]; }; +struct adv_rssi_thresholds { + __s8 low_threshold; + __s8 high_threshold; + __u16 low_threshold_timeout; + __u16 high_threshold_timeout; + __u8 sampling_period; +}; + struct adv_monitor { struct list_head patterns; - bool active; + struct adv_rssi_thresholds rssi; __u16 handle; + + enum { + ADV_MONITOR_STATE_NOT_REGISTERED, + ADV_MONITOR_STATE_REGISTERED, + ADV_MONITOR_STATE_OFFLOADED + } state; }; #define HCI_MIN_ADV_MONITOR_HANDLE 1 -#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 +#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 +#define HCI_ADV_MONITOR_EXT_NONE 1 +#define HCI_ADV_MONITOR_EXT_MSFT 2 #define HCI_MAX_SHORT_NAME_LENGTH 10 @@ -1316,10 +1334,15 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); -void hci_free_adv_monitor(struct adv_monitor *monitor); -int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); -int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); +void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); +int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); +bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, + int *err); +bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err); +bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err); bool hci_is_adv_monitoring(struct hci_dev *hdev); +int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); @@ -1342,6 +1365,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) +#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) @@ -1794,7 +1818,10 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); +void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); +int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); +int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 1d1232917de7..61800a7b6192 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -207,6 +207,7 @@ struct l2cap_hdr { __le16 len; __le16 cid; } __packed; +#define L2CAP_LEN_SIZE 2 #define L2CAP_HDR_SIZE 4 #define L2CAP_ENH_HDR_SIZE 6 #define L2CAP_EXT_HDR_SIZE 8 diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index f9a6638e20b3..839a2028009e 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -821,6 +821,22 @@ struct mgmt_rp_add_ext_adv_data { __u8 instance; } __packed; +struct mgmt_adv_rssi_thresholds { + __s8 high_threshold; + __le16 high_threshold_timeout; + __s8 low_threshold; + __le16 low_threshold_timeout; + __u8 sampling_period; +} __packed; + +#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056 +struct mgmt_cp_add_adv_patterns_monitor_rssi { + struct mgmt_adv_rssi_thresholds rssi; + __u8 pattern_count; + struct mgmt_adv_pattern patterns[]; +} __packed; +#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bonding.h b/include/net/bonding.h index adc3da776970..019e998d944a 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -89,6 +89,8 @@ #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ NETIF_F_GSO_ESP) +#define BOND_TLS_FEATURES (NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX) + #ifdef CONFIG_NET_POLL_CONTROLLER extern atomic_t netpoll_block_tx; @@ -265,6 +267,8 @@ struct bond_vlan_tag { unsigned short vlan_id; }; +bool bond_sk_check(struct bonding *bond); + /** * Returns NULL if the net_device does not belong to any of the bond's slaves * diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0d6f7ec86061..911fae42b0c0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1460,6 +1460,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_DMG: 60GHz MCS * @RATE_INFO_FLAGS_HE_MCS: HE MCS information * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode + * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), @@ -1468,6 +1469,7 @@ enum rate_info_flags { RATE_INFO_FLAGS_DMG = BIT(3), RATE_INFO_FLAGS_HE_MCS = BIT(4), RATE_INFO_FLAGS_EDMG = BIT(5), + RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6), }; /** @@ -2581,12 +2583,14 @@ struct cfg80211_auth_request { * authentication capability. Drivers can offload authentication to * userspace if this flag is set. Only applicable for cfg80211_connect() * request (connect callback). + * @ASSOC_REQ_DISABLE_HE: Disable HE */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), ASSOC_REQ_USE_RRM = BIT(2), CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), + ASSOC_REQ_DISABLE_HE = BIT(4), }; /** @@ -3630,9 +3634,10 @@ struct mgmt_frame_regs { * All callbacks except where otherwise noted should return 0 * on success or a negative error code. * - * All operations are currently invoked under rtnl for consistency with the - * wireless extensions but this is subject to reevaluation as soon as this - * code is used more widely and we have a first user without wext. + * All operations are invoked with the wiphy mutex held. The RTNL may be + * held in addition (due to wireless extensions) but this cannot be relied + * upon except in cases where documented below. Note that due to ordering, + * the RTNL also cannot be acquired in any handlers. * * @suspend: wiphy device needs to be suspended. The variable @wow will * be %NULL or contain the enabled Wake-on-Wireless triggers that are @@ -3647,11 +3652,14 @@ struct mgmt_frame_regs { * the new netdev in the wiphy's network namespace! Returns the struct * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must * also set the address member in the wdev. + * This additionally holds the RTNL to be able to do netdev changes. * * @del_virtual_intf: remove the virtual interface + * This additionally holds the RTNL to be able to do netdev changes. * * @change_virtual_intf: change type/configuration of virtual interface, * keep the struct wireless_dev's iftype updated. + * This additionally holds the RTNL to be able to do netdev changes. * * @add_key: add a key with the given parameters. @mac_addr will be %NULL * when adding a group key. @@ -4741,6 +4749,7 @@ struct wiphy_iftype_akm_suites { /** * struct wiphy - wireless hardware description + * @mtx: mutex for the data (structures) of this device * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() * the reg_notifier's request can be passed as NULL @@ -4934,6 +4943,8 @@ struct wiphy_iftype_akm_suites { * @sar_capa: SAR control capabilities */ struct wiphy { + struct mutex mtx; + /* assign these fields before you register the wiphy */ u8 perm_addr[ETH_ALEN]; @@ -5186,6 +5197,37 @@ static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, */ int wiphy_register(struct wiphy *wiphy); +/* this is a define for better error reporting (file/line) */ +#define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx) + +/** + * rcu_dereference_wiphy - rcu_dereference with debug checking + * @wiphy: the wiphy to check the locking on + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference(). + */ +#define rcu_dereference_wiphy(wiphy, p) \ + rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx)) + +/** + * wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx + * @wiphy: the wiphy to check the locking on + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit the + * READ_ONCE(), because caller holds the wiphy mutex used for updates. + */ +#define wiphy_dereference(wiphy, p) \ + rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx)) + +/** + * get_wiphy_regdom - get custom regdomain for the given wiphy + * @wiphy: the wiphy to get the regdomain from + */ +const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy); + /** * wiphy_unregister - deregister a wiphy from cfg80211 * @@ -5211,13 +5253,45 @@ struct cfg80211_cached_keys; struct cfg80211_cqm_config; /** + * wiphy_lock - lock the wiphy + * @wiphy: the wiphy to lock + * + * This is mostly exposed so it can be done around registering and + * unregistering netdevs that aren't created through cfg80211 calls, + * since that requires locking in cfg80211 when the notifiers is + * called, but that cannot differentiate which way it's called. + * + * When cfg80211 ops are called, the wiphy is already locked. + */ +static inline void wiphy_lock(struct wiphy *wiphy) + __acquires(&wiphy->mtx) +{ + mutex_lock(&wiphy->mtx); + __acquire(&wiphy->mtx); +} + +/** + * wiphy_unlock - unlock the wiphy again + * @wiphy: the wiphy to unlock + */ +static inline void wiphy_unlock(struct wiphy *wiphy) + __releases(&wiphy->mtx) +{ + __release(&wiphy->mtx); + mutex_unlock(&wiphy->mtx); +} + +/** * struct wireless_dev - wireless device state * * For netdevs, this structure must be allocated by the driver * that uses the ieee80211_ptr field in struct net_device (this * is intentional so it can be allocated along with the netdev.) * It need not be registered then as netdev registration will - * be intercepted by cfg80211 to see the new wireless device. + * be intercepted by cfg80211 to see the new wireless device, + * however, drivers must lock the wiphy before registering or + * unregistering netdevs if they pre-create any netdevs (in ops + * called from cfg80211, the wiphy is already locked.) * * For non-netdev uses, it must also be allocated by the driver * in response to the cfg80211 callbacks that require it, as @@ -5226,6 +5300,9 @@ struct cfg80211_cqm_config; * * @wiphy: pointer to hardware description * @iftype: interface type + * @registered: is this wdev already registered with cfg80211 + * @registering: indicates we're doing registration under wiphy lock + * for the notifier * @list: (private) Used to collect the interfaces * @netdev: (private) Used to reference back to the netdev, may be %NULL * @identifier: (private) Identifier used in nl80211 to identify this @@ -5309,7 +5386,7 @@ struct wireless_dev { struct mutex mtx; - bool use_4addr, is_running; + bool use_4addr, is_running, registered, registering; u8 address[ETH_ALEN] __aligned(sizeof(u16)); @@ -5978,18 +6055,18 @@ int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd); /** - * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers + * regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers * @wiphy: the wireless device we want to process the regulatory domain on * @rd: the regulatory domain information to use for this wiphy * - * This functions requires the RTNL to be held and applies the new regdomain - * synchronously to this wiphy. For more details see - * regulatory_set_wiphy_regd(). + * This functions requires the RTNL and the wiphy mutex to be held and + * applies the new regdomain synchronously to this wiphy. For more details + * see regulatory_set_wiphy_regd(). * * Return: 0 on success. -EINVAL, -EPERM */ -int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy, - struct ieee80211_regdomain *rd); +int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, + struct ieee80211_regdomain *rd); /** * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain @@ -6107,7 +6184,7 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid); void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid); /** - * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped + * cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped * @reqid: identifier for the related scheduled scan request @@ -6115,9 +6192,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid); * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. - * This function should be called with rtnl locked. + * This function should be called with the wiphy mutex held. */ -void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid); +void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid); /** * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame @@ -7554,7 +7631,7 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, * also checks if IR-relaxation conditions apply, to allow beaconing under * more permissive conditions. * - * Requires the RTNL to be held. + * Requires the wiphy mutex to be held. */ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, @@ -7652,19 +7729,46 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate); * cfg80211_unregister_wdev - remove the given wdev * @wdev: struct wireless_dev to remove * - * Call this function only for wdevs that have no netdev assigned, - * e.g. P2P Devices. It removes the device from the list so that - * it can no longer be used. It is necessary to call this function - * even when cfg80211 requests the removal of the interface by - * calling the del_virtual_intf() callback. The function must also - * be called when the driver wishes to unregister the wdev, e.g. - * when the device is unbound from the driver. + * This function removes the device so it can no longer be used. It is necessary + * to call this function even when cfg80211 requests the removal of the device + * by calling the del_virtual_intf() callback. The function must also be called + * when the driver wishes to unregister the wdev, e.g. when the hardware device + * is unbound from the driver. * - * Requires the RTNL to be held. + * Requires the RTNL and wiphy mutex to be held. */ void cfg80211_unregister_wdev(struct wireless_dev *wdev); /** + * cfg80211_register_netdevice - register the given netdev + * @dev: the netdev to register + * + * Note: In contexts coming from cfg80211 callbacks, you must call this rather + * than register_netdevice(), unregister_netdev() is impossible as the RTNL is + * held. Otherwise, both register_netdevice() and register_netdev() are usable + * instead as well. + * + * Requires the RTNL and wiphy mutex to be held. + */ +int cfg80211_register_netdevice(struct net_device *dev); + +/** + * cfg80211_unregister_netdevice - unregister the given netdev + * @dev: the netdev to register + * + * Note: In contexts coming from cfg80211 callbacks, you must call this rather + * than unregister_netdevice(), unregister_netdev() is impossible as the RTNL + * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are + * usable instead as well. + * + * Requires the RTNL and wiphy mutex to be held. + */ +static inline void cfg80211_unregister_netdevice(struct net_device *dev) +{ + cfg80211_unregister_wdev(dev->ieee80211_ptr); +} + +/** * struct cfg80211_ft_event_params - FT Information Elements * @ies: FT IEs * @ies_len: length of the FT IE in bytes diff --git a/include/net/devlink.h b/include/net/devlink.h index f466819cc477..853420db5d32 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -94,6 +94,18 @@ struct devlink_port_pci_vf_attrs { }; /** + * struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes + * @controller: Associated controller number + * @sf: Associated PCI SF for of the PCI PF for this port. + * @pf: Associated PCI PF number for this port. + */ +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; +}; + +/** * struct devlink_port_attrs - devlink port object * @flavour: flavour of the port * @split: indicates if this is split port @@ -103,6 +115,7 @@ struct devlink_port_pci_vf_attrs { * @phys: physical port attributes * @pci_pf: PCI PF port attributes * @pci_vf: PCI VF port attributes + * @pci_sf: PCI SF port attributes */ struct devlink_port_attrs { u8 split:1, @@ -114,6 +127,7 @@ struct devlink_port_attrs { struct devlink_port_phys_attrs phys; struct devlink_port_pci_pf_attrs pci_pf; struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; }; }; @@ -138,6 +152,17 @@ struct devlink_port { struct mutex reporters_lock; /* Protects reporter_list */ }; +struct devlink_port_new_attrs { + enum devlink_port_flavour flavour; + unsigned int port_index; + u32 controller; + u32 sfnum; + u16 pfnum; + u8 port_index_valid:1, + controller_valid:1, + sfnum_valid:1; +}; + struct devlink_sb_pool_info { enum devlink_sb_pool_type pool_type; u32 size; @@ -380,6 +405,8 @@ struct devlink_resource { #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 +#define DEVLINK_RESOURCE_GENERIC_NAME_PORTS "physical_ports" + #define __DEVLINK_PARAM_MAX_STRING_VALUE 32 enum devlink_param_type { DEVLINK_PARAM_TYPE_U8, @@ -836,6 +863,7 @@ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_GTP_PARSING, DEVLINK_TRAP_GENERIC_ID_ESP_PARSING, DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP, + DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER, /* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@ -1061,6 +1089,8 @@ enum devlink_trap_group_generic_id { "esp_parsing" #define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_NEXTHOP \ "blackhole_nexthop" +#define DEVLINK_TRAP_GENERIC_NAME_DMAC_FILTER \ + "dmac_filter" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" @@ -1348,6 +1378,79 @@ struct devlink_ops { int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack); + /** + * port_new() - Add a new port function of a specified flavor + * @devlink: Devlink instance + * @attrs: attributes of the new port + * @extack: extack for reporting error messages + * @new_port_index: index of the new port + * + * Devlink core will call this device driver function upon user request + * to create a new port function of a specified flavor and optional + * attributes + * + * Notes: + * - Called without devlink instance lock being held. Drivers must + * implement own means of synchronization + * - On success, drivers must register a port with devlink core + * + * Return: 0 on success, negative value otherwise. + */ + int (*port_new)(struct devlink *devlink, + const struct devlink_port_new_attrs *attrs, + struct netlink_ext_ack *extack, + unsigned int *new_port_index); + /** + * port_del() - Delete a port function + * @devlink: Devlink instance + * @port_index: port function index to delete + * @extack: extack for reporting error messages + * + * Devlink core will call this device driver function upon user request + * to delete a previously created port function + * + * Notes: + * - Called without devlink instance lock being held. Drivers must + * implement own means of synchronization + * - On success, drivers must unregister the corresponding devlink + * port + * + * Return: 0 on success, negative value otherwise. + */ + int (*port_del)(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack); + /** + * port_fn_state_get() - Get the state of a port function + * @devlink: Devlink instance + * @port: The devlink port + * @state: Admin configured state + * @opstate: Current operational state + * @extack: extack for reporting error messages + * + * Reports the admin and operational state of a devlink port function + * + * Return: 0 on success, negative value otherwise. + */ + int (*port_fn_state_get)(struct devlink *devlink, + struct devlink_port *port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack); + /** + * port_fn_state_set() - Set the admin state of a port function + * @devlink: Devlink instance + * @port: The devlink port + * @state: Admin state + * @extack: extack for reporting error messages + * + * Set the admin state of a devlink port function + * + * Return: 0 on success, negative value otherwise. + */ + int (*port_fn_state_set)(struct devlink *devlink, + struct devlink_port *port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) @@ -1404,6 +1507,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro u16 pf, bool external); void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u16 vf, bool external); +void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, + u32 controller, u16 pf, u32 sf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/net/dsa.h b/include/net/dsa.h index 4e60d2610f20..83a933e563fe 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -46,6 +46,9 @@ struct phylink_link_state; #define DSA_TAG_PROTO_AR9331_VALUE 16 #define DSA_TAG_PROTO_RTL4_A_VALUE 17 #define DSA_TAG_PROTO_HELLCREEK_VALUE 18 +#define DSA_TAG_PROTO_XRS700X_VALUE 19 +#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 +#define DSA_TAG_PROTO_SEVILLE_VALUE 21 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -67,6 +70,9 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE, DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE, DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE, + DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE, + DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, + DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, }; struct packet_type; @@ -138,6 +144,9 @@ struct dsa_switch_tree { /* Has this tree been applied to the hardware? */ bool setup; + /* Tagging protocol operations */ + const struct dsa_device_ops *tag_ops; + /* * Configuration data for the platform device that owns * this dsa switch tree instance. @@ -149,8 +158,45 @@ struct dsa_switch_tree { /* List of DSA links composing the routing table */ struct list_head rtable; + + /* Maps offloaded LAG netdevs to a zero-based linear ID for + * drivers that need it. + */ + struct net_device **lags; + unsigned int lags_len; }; +#define dsa_lags_foreach_id(_id, _dst) \ + for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \ + if ((_dst)->lags[(_id)]) + +#define dsa_lag_foreach_port(_dp, _dst, _lag) \ + list_for_each_entry((_dp), &(_dst)->ports, list) \ + if ((_dp)->lag_dev == (_lag)) + +#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \ + list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ + if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr)) + +static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst, + unsigned int id) +{ + return dst->lags[id]; +} + +static inline int dsa_lag_id(struct dsa_switch_tree *dst, + struct net_device *lag) +{ + unsigned int id; + + dsa_lags_foreach_id(id, dst) { + if (dsa_lag_dev(dst, id) == lag) + return id; + } + + return -ENODEV; +} + /* TC matchall action types */ enum dsa_port_mall_action_type { DSA_PORT_MALL_MIRROR, @@ -190,7 +236,9 @@ struct dsa_port { struct net_device *slave; }; - /* CPU port tagging operations used by master or slave devices */ + /* Copy of the tagging protocol operations, for quicker access + * in the data path. Valid only for the CPU ports. + */ const struct dsa_device_ops *tag_ops; /* Copies for faster access in master receive hot path */ @@ -220,6 +268,9 @@ struct dsa_port { bool devlink_port_setup; struct phylink *pl; struct phylink_config pl_config; + struct net_device *lag_dev; + bool lag_tx_enabled; + struct net_device *hsr_dev; struct list_head list; @@ -319,6 +370,11 @@ struct dsa_switch { */ bool untag_bridge_pvid; + /* Let DSA manage the FDB entries towards the CPU, based on the + * software bridge database. + */ + bool assisted_learning_on_cpu_port; + /* In case vlan_filtering_is_global is set, the VLAN awareness state * should be retrieved from here and not from the per-port settings. */ @@ -335,6 +391,14 @@ struct dsa_switch { */ bool mtu_enforcement_ingress; + /* Drivers that benefit from having an ID associated with each + * offloaded LAG should set this to the maximum number of + * supported IDs. DSA will then maintain a mapping of _at + * least_ these many IDs, accessible to drivers via + * dsa_lag_id(). + */ + unsigned int num_lag_ids; + size_t num_ports; }; @@ -430,9 +494,18 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { + /* + * Tagging protocol helpers called for the CPU ports and DSA links. + * @get_tag_protocol retrieves the initial tagging protocol and is + * mandatory. Switches which can operate using multiple tagging + * protocols should implement @change_tag_protocol and report in + * @get_tag_protocol the tagger in current use. + */ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); + int (*change_tag_protocol)(struct dsa_switch *ds, int port, + enum dsa_tag_protocol proto); int (*setup)(struct dsa_switch *ds); void (*teardown)(struct dsa_switch *ds); @@ -477,7 +550,7 @@ struct dsa_switch_ops { void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* - * ethtool hardware statistics. + * Port statistics counters. */ void (*get_strings)(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data); @@ -486,6 +559,8 @@ struct dsa_switch_ops { int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); void (*get_ethtool_phy_stats)(struct dsa_switch *ds, int port, uint64_t *data); + void (*get_stats64)(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s); /* * ethtool Wake-on-LAN @@ -553,19 +628,24 @@ struct dsa_switch_ops { void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); void (*port_fast_age)(struct dsa_switch *ds, int port); - int (*port_egress_floods)(struct dsa_switch *ds, int port, - bool unicast, bool multicast); + int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); + int (*port_bridge_flags)(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); + int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack); /* * VLAN support */ int (*port_vlan_filtering)(struct dsa_switch *ds, int port, bool vlan_filtering, - struct switchdev_trans *trans); - int (*port_vlan_prepare)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan); - void (*port_vlan_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan); + struct netlink_ext_ack *extack); + int (*port_vlan_add)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); /* @@ -581,10 +661,8 @@ struct dsa_switch_ops { /* * Multicast database */ - int (*port_mdb_prepare)(struct dsa_switch *ds, int port, + int (*port_mdb_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); - void (*port_mdb_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); int (*port_mdb_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); /* @@ -624,6 +702,13 @@ struct dsa_switch_ops { void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct net_device *br); + int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, + int port); + int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, + int port, struct net_device *lag, + struct netdev_lag_upper_info *info); + int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index, + int port, struct net_device *lag); /* * PTP functionality @@ -645,6 +730,40 @@ struct dsa_switch_ops { int (*devlink_info_get)(struct dsa_switch *ds, struct devlink_info_req *req, struct netlink_ext_ack *extack); + int (*devlink_sb_pool_get)(struct dsa_switch *ds, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); + int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); + int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); + int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack); + int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); + int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); + int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds, + unsigned int sb_index); + int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds, + unsigned int sb_index); + int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); + int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); /* * MTU change functionality. Switches can also adjust their MRU through @@ -655,6 +774,36 @@ struct dsa_switch_ops { int (*port_change_mtu)(struct dsa_switch *ds, int port, int new_mtu); int (*port_max_mtu)(struct dsa_switch *ds, int port); + + /* + * LAG integration + */ + int (*port_lag_change)(struct dsa_switch *ds, int port); + int (*port_lag_join)(struct dsa_switch *ds, int port, + struct net_device *lag, + struct netdev_lag_upper_info *info); + int (*port_lag_leave)(struct dsa_switch *ds, int port, + struct net_device *lag); + + /* + * HSR integration + */ + int (*port_hsr_join)(struct dsa_switch *ds, int port, + struct net_device *hsr); + int (*port_hsr_leave)(struct dsa_switch *ds, int port, + struct net_device *hsr); + + /* + * MRP integration + */ + int (*port_mrp_add)(struct dsa_switch *ds, int port, + const struct switchdev_obj_mrp *mrp); + int (*port_mrp_del)(struct dsa_switch *ds, int port, + const struct switchdev_obj_mrp *mrp); + int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port, + const struct switchdev_obj_ring_role_mrp *mrp); + int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port, + const struct switchdev_obj_ring_role_mrp *mrp); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ @@ -828,57 +977,15 @@ static inline int dsa_switch_resume(struct dsa_switch *ds) } #endif /* CONFIG_PM_SLEEP */ -enum dsa_notifier_type { - DSA_PORT_REGISTER, - DSA_PORT_UNREGISTER, -}; - -struct dsa_notifier_info { - struct net_device *dev; -}; - -struct dsa_notifier_register_info { - struct dsa_notifier_info info; /* must be first */ - struct net_device *master; - unsigned int port_number; - unsigned int switch_number; -}; - -static inline struct net_device * -dsa_notifier_info_to_dev(const struct dsa_notifier_info *info) -{ - return info->dev; -} - #if IS_ENABLED(CONFIG_NET_DSA) -int register_dsa_notifier(struct notifier_block *nb); -int unregister_dsa_notifier(struct notifier_block *nb); -int call_dsa_notifiers(unsigned long val, struct net_device *dev, - struct dsa_notifier_info *info); +bool dsa_slave_dev_check(const struct net_device *dev); #else -static inline int register_dsa_notifier(struct notifier_block *nb) +static inline bool dsa_slave_dev_check(const struct net_device *dev) { - return 0; -} - -static inline int unregister_dsa_notifier(struct notifier_block *nb) -{ - return 0; -} - -static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev, - struct dsa_notifier_info *info) -{ - return NOTIFY_DONE; + return false; } #endif -/* Broadcom tag specific helpers to insert and extract queue/port number */ -#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q) -#define BRCM_TAG_GET_PORT(v) ((v) >> 8) -#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff) - - netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev); int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data); int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data); diff --git a/include/net/dst.h b/include/net/dst.h index 10f0a8399867..26f134ad3a25 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -18,6 +18,7 @@ #include <linux/refcount.h> #include <net/neighbour.h> #include <asm/processor.h> +#include <linux/indirect_call_wrapper.h> struct sk_buff; @@ -193,9 +194,11 @@ dst_feature(const struct dst_entry *dst, u32 feature) return dst_metric(dst, RTAX_FEATURES) & feature; } +INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *)); +INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *)); static inline u32 dst_mtu(const struct dst_entry *dst) { - return dst->ops->mtu(dst); + return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst); } /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ @@ -435,22 +438,36 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) dst->expires = expires; } +INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *, + struct sk_buff *)); /* Output packet to network from transport. */ static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - return skb_dst(skb)->output(net, sk, skb); + return INDIRECT_CALL_INET(skb_dst(skb)->output, + ip6_output, ip_output, + net, sk, skb); } +INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *)); /* Input packet from network to transport. */ static inline int dst_input(struct sk_buff *skb) { - return skb_dst(skb)->input(skb); + return INDIRECT_CALL_INET(skb_dst(skb)->input, + ip6_input, ip_local_deliver, skb); } +INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, + u32)); +INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, + u32)); static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) { if (dst->obsolete) - dst = dst->ops->check(dst, cookie); + dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, + ipv4_dst_check, dst, cookie); return dst; } diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 123b1e9ea304..e6bd8ebf9ac3 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -245,6 +245,7 @@ struct flow_action_entry { unsigned long cookie; u32 mark; u32 labels[4]; + bool orig_dir; } ct_metadata; struct { /* FLOW_ACTION_MPLS_PUSH */ u32 label; diff --git a/include/net/fq.h b/include/net/fq.h index e39f3f8d5f8a..2eccbbd2b559 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -19,8 +19,6 @@ struct fq_tin; * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++ * (deficit round robin) based round robin queuing similar to the one * found in net/sched/sch_fq_codel.c - * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of - * fat flows and efficient head-dropping if packet limit is reached * @queue: sk_buff queue to hold packets * @backlog: number of bytes pending in the queue. The number of packets can be * found in @queue.qlen @@ -29,7 +27,6 @@ struct fq_tin; struct fq_flow { struct fq_tin *tin; struct list_head flowchain; - struct list_head backlogchain; struct sk_buff_head queue; u32 backlog; int deficit; @@ -47,6 +44,8 @@ struct fq_flow { struct fq_tin { struct list_head new_flows; struct list_head old_flows; + struct list_head tin_list; + struct fq_flow default_flow; u32 backlog_bytes; u32 backlog_packets; u32 overlimit; @@ -59,14 +58,14 @@ struct fq_tin { /** * struct fq - main container for fair queuing purposes * - * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient - * head-dropping when @backlog reaches @limit * @limit: max number of packets that can be queued across all flows * @backlog: number of packets queued across all flows */ struct fq { struct fq_flow *flows; - struct list_head backlogs; + unsigned long *flows_bitmap; + + struct list_head tin_backlog; spinlock_t lock; u32 flows_cnt; u32 limit; diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index e73d74d2fabf..a5f67a2c0c73 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -11,35 +11,37 @@ /* functions that are embedded into includer */ -static void fq_adjust_removal(struct fq *fq, - struct fq_flow *flow, - struct sk_buff *skb) + +static void +__fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, + unsigned int bytes, unsigned int truesize) { struct fq_tin *tin = flow->tin; + int idx; - tin->backlog_bytes -= skb->len; - tin->backlog_packets--; - flow->backlog -= skb->len; - fq->backlog--; - fq->memory_usage -= skb->truesize; -} + tin->backlog_bytes -= bytes; + tin->backlog_packets -= packets; + flow->backlog -= bytes; + fq->backlog -= packets; + fq->memory_usage -= truesize; -static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) -{ - struct fq_flow *i; + if (flow->backlog) + return; - if (flow->backlog == 0) { - list_del_init(&flow->backlogchain); - } else { - i = flow; + if (flow == &tin->default_flow) { + list_del_init(&tin->tin_list); + return; + } - list_for_each_entry_continue(i, &fq->backlogs, backlogchain) - if (i->backlog < flow->backlog) - break; + idx = flow - fq->flows; + __clear_bit(idx, fq->flows_bitmap); +} - list_move_tail(&flow->backlogchain, - &i->backlogchain); - } +static void fq_adjust_removal(struct fq *fq, + struct fq_flow *flow, + struct sk_buff *skb) +{ + __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); } static struct sk_buff *fq_flow_dequeue(struct fq *fq, @@ -54,11 +56,37 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq, return NULL; fq_adjust_removal(fq, flow, skb); - fq_rejigger_backlog(fq, flow); return skb; } +static int fq_flow_drop(struct fq *fq, struct fq_flow *flow, + fq_skb_free_t free_func) +{ + unsigned int packets = 0, bytes = 0, truesize = 0; + struct fq_tin *tin = flow->tin; + struct sk_buff *skb; + int pending; + + lockdep_assert_held(&fq->lock); + + pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2); + do { + skb = __skb_dequeue(&flow->queue); + if (!skb) + break; + + packets++; + bytes += skb->len; + truesize += skb->truesize; + free_func(fq, tin, flow, skb); + } while (packets < pending); + + __fq_adjust_removal(fq, flow, packets, bytes, truesize); + + return packets; +} + static struct sk_buff *fq_tin_dequeue(struct fq *fq, struct fq_tin *tin, fq_tin_dequeue_t dequeue_func) @@ -115,8 +143,7 @@ static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) static struct fq_flow *fq_flow_classify(struct fq *fq, struct fq_tin *tin, u32 idx, - struct sk_buff *skb, - fq_flow_get_default_t get_default_func) + struct sk_buff *skb) { struct fq_flow *flow; @@ -124,7 +151,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq, flow = &fq->flows[idx]; if (flow->tin && flow->tin != tin) { - flow = get_default_func(fq, tin, idx, skb); + flow = &tin->default_flow; tin->collisions++; fq->collisions++; } @@ -135,36 +162,56 @@ static struct fq_flow *fq_flow_classify(struct fq *fq, return flow; } -static void fq_recalc_backlog(struct fq *fq, - struct fq_tin *tin, - struct fq_flow *flow) +static struct fq_flow *fq_find_fattest_flow(struct fq *fq) { - struct fq_flow *i; + struct fq_tin *tin; + struct fq_flow *flow = NULL; + u32 len = 0; + int i; - if (list_empty(&flow->backlogchain)) - list_add_tail(&flow->backlogchain, &fq->backlogs); + for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) { + struct fq_flow *cur = &fq->flows[i]; + unsigned int cur_len; - i = flow; - list_for_each_entry_continue_reverse(i, &fq->backlogs, - backlogchain) - if (i->backlog > flow->backlog) - break; + cur_len = cur->backlog; + if (cur_len <= len) + continue; + + flow = cur; + len = cur_len; + } + + list_for_each_entry(tin, &fq->tin_backlog, tin_list) { + unsigned int cur_len = tin->default_flow.backlog; - list_move(&flow->backlogchain, &i->backlogchain); + if (cur_len <= len) + continue; + + flow = &tin->default_flow; + len = cur_len; + } + + return flow; } static void fq_tin_enqueue(struct fq *fq, struct fq_tin *tin, u32 idx, struct sk_buff *skb, - fq_skb_free_t free_func, - fq_flow_get_default_t get_default_func) + fq_skb_free_t free_func) { struct fq_flow *flow; bool oom; lockdep_assert_held(&fq->lock); - flow = fq_flow_classify(fq, tin, idx, skb, get_default_func); + flow = fq_flow_classify(fq, tin, idx, skb); + + if (!flow->backlog) { + if (flow != &tin->default_flow) + __set_bit(idx, fq->flows_bitmap); + else if (list_empty(&tin->tin_list)) + list_add(&tin->tin_list, &fq->tin_backlog); + } flow->tin = tin; flow->backlog += skb->len; @@ -173,8 +220,6 @@ static void fq_tin_enqueue(struct fq *fq, fq->memory_usage += skb->truesize; fq->backlog++; - fq_recalc_backlog(fq, tin, flow); - if (list_empty(&flow->flowchain)) { flow->deficit = fq->quantum; list_add_tail(&flow->flowchain, @@ -184,18 +229,13 @@ static void fq_tin_enqueue(struct fq *fq, __skb_queue_tail(&flow->queue, skb); oom = (fq->memory_usage > fq->memory_limit); while (fq->backlog > fq->limit || oom) { - flow = list_first_entry_or_null(&fq->backlogs, - struct fq_flow, - backlogchain); + flow = fq_find_fattest_flow(fq); if (!flow) return; - skb = fq_flow_dequeue(fq, flow); - if (!skb) + if (!fq_flow_drop(fq, flow, free_func)) return; - free_func(fq, flow->tin, flow, skb); - flow->tin->overlimit++; fq->overlimit++; if (oom) { @@ -224,8 +264,6 @@ static void fq_flow_filter(struct fq *fq, fq_adjust_removal(fq, flow, skb); free_func(fq, tin, flow, skb); } - - fq_rejigger_backlog(fq, flow); } static void fq_tin_filter(struct fq *fq, @@ -248,16 +286,18 @@ static void fq_flow_reset(struct fq *fq, struct fq_flow *flow, fq_skb_free_t free_func) { + struct fq_tin *tin = flow->tin; struct sk_buff *skb; while ((skb = fq_flow_dequeue(fq, flow))) - free_func(fq, flow->tin, flow, skb); + free_func(fq, tin, flow, skb); - if (!list_empty(&flow->flowchain)) + if (!list_empty(&flow->flowchain)) { list_del_init(&flow->flowchain); - - if (!list_empty(&flow->backlogchain)) - list_del_init(&flow->backlogchain); + if (list_empty(&tin->new_flows) && + list_empty(&tin->old_flows)) + list_del_init(&tin->tin_list); + } flow->tin = NULL; @@ -283,6 +323,7 @@ static void fq_tin_reset(struct fq *fq, fq_flow_reset(fq, flow, free_func); } + WARN_ON_ONCE(!list_empty(&tin->tin_list)); WARN_ON_ONCE(tin->backlog_bytes); WARN_ON_ONCE(tin->backlog_packets); } @@ -290,7 +331,6 @@ static void fq_tin_reset(struct fq *fq, static void fq_flow_init(struct fq_flow *flow) { INIT_LIST_HEAD(&flow->flowchain); - INIT_LIST_HEAD(&flow->backlogchain); __skb_queue_head_init(&flow->queue); } @@ -298,6 +338,8 @@ static void fq_tin_init(struct fq_tin *tin) { INIT_LIST_HEAD(&tin->new_flows); INIT_LIST_HEAD(&tin->old_flows); + INIT_LIST_HEAD(&tin->tin_list); + fq_flow_init(&tin->default_flow); } static int fq_init(struct fq *fq, int flows_cnt) @@ -305,8 +347,8 @@ static int fq_init(struct fq *fq, int flows_cnt) int i; memset(fq, 0, sizeof(fq[0])); - INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); + INIT_LIST_HEAD(&fq->tin_backlog); fq->flows_cnt = max_t(u32, flows_cnt, 1); fq->quantum = 300; fq->limit = 8192; @@ -316,6 +358,14 @@ static int fq_init(struct fq *fq, int flows_cnt) if (!fq->flows) return -ENOMEM; + fq->flows_bitmap = kcalloc(BITS_TO_LONGS(fq->flows_cnt), sizeof(long), + GFP_KERNEL); + if (!fq->flows_bitmap) { + kvfree(fq->flows); + fq->flows = NULL; + return -ENOMEM; + } + for (i = 0; i < fq->flows_cnt; i++) fq_flow_init(&fq->flows[i]); @@ -332,6 +382,9 @@ static void fq_reset(struct fq *fq, kvfree(fq->flows); fq->flows = NULL; + + kfree(fq->flows_bitmap); + fq->flows_bitmap = NULL; } #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index e55ec1597ce7..7cb3fa8310ed 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -14,6 +14,7 @@ */ struct genl_multicast_group { char name[GENL_NAMSIZ]; + u8 flags; }; struct genl_ops; diff --git a/include/net/gre.h b/include/net/gre.h index b60f212c16c6..4e209708b754 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -106,17 +106,6 @@ static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) return flags; } -static inline __sum16 gre_checksum(struct sk_buff *skb) -{ - __wsum csum; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - csum = lco_csum(skb); - else - csum = skb_checksum(skb, 0, skb->len, 0); - return csum_fold(csum); -} - static inline void gre_build_header(struct sk_buff *skb, int hdr_len, __be16 flags, __be16 proto, __be32 key, __be32 seq) @@ -146,7 +135,13 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len, !(skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; - *(__sum16 *)ptr = gre_checksum(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + *(__sum16 *)ptr = csum_fold(lco_csum(skb)); + } else { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = sizeof(*greh); + } } } } diff --git a/include/net/gro.h b/include/net/gro.h new file mode 100644 index 000000000000..8a6eb5303cc4 --- /dev/null +++ b/include/net/gro.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _NET_IPV6_GRO_H +#define _NET_IPV6_GRO_H + +INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); +#endif /* _NET_IPV6_GRO_H */ diff --git a/include/net/icmp.h b/include/net/icmp.h index 9ac2d2672a93..fd84adc47963 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 #if IS_ENABLED(CONFIG_NF_NAT) void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); #else -#define icmp_ndo_send icmp_send +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct ip_options opts = { 0 }; + __icmp_send(skb_in, type, code, info, &opts); +} #endif int icmp_rcv(struct sk_buff *skb); diff --git a/include/net/inet_common.h b/include/net/inet_common.h index cb2818862919..cad2a611efde 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -41,6 +41,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); #define BIND_WITH_LOCK (1 << 1) /* Called from BPF program. */ #define BIND_FROM_BPF (1 << 2) +/* Skip CAP_NET_BIND_SERVICE check. */ +#define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3) int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, u32 flags); int inet_getname(struct socket *sock, struct sockaddr *uaddr, diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 111d7771b208..10a625760de9 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -120,14 +120,14 @@ struct inet_connection_sock { __u16 rcv_mss; /* MSS used for delayed ACK decisions */ } icsk_ack; struct { - int enabled; - /* Range of MTUs to search */ int search_high; int search_low; /* Information on the current probe. */ - int probe_size; + u32 probe_size:31, + /* Is the MTUP feature enabled for this connection? */ + enabled:1; u32 probe_timestamp; } icsk_mtup; @@ -141,7 +141,6 @@ struct inet_connection_sock { #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ -#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ #define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ @@ -227,8 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, } if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || - what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE || - what == ICSK_TIME_REO_TIMEOUT) { + what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { icsk->icsk_pending = what; icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index ac5ff3c3afb1..15b7fbe6b15c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -195,7 +195,8 @@ struct fib6_info { fib6_destroying:1, offload:1, trap:1, - unused:2; + offload_failed:1, + unused:1; struct rcu_head rcu; struct nexthop *nh; @@ -336,13 +337,6 @@ static inline void fib6_info_release(struct fib6_info *f6i) call_rcu(&f6i->rcu, fib6_info_destroy_rcu); } -static inline void fib6_info_hw_flags_set(struct fib6_info *f6i, bool offload, - bool trap) -{ - f6i->offload = offload; - f6i->trap = trap; -} - enum fib6_walk_state { #ifdef CONFIG_IPV6_SUBTREES FWS_S, @@ -545,6 +539,8 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) { return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric)); } +void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i, + bool offload, bool trap, bool offload_failed); #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) struct bpf_iter__ipv6_route { diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2a5277758379..f51a118bfce8 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -174,7 +174,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net, struct net_device *dev); struct fib6_info *rt6_add_dflt_router(struct net *net, const struct in6_addr *gwaddr, - struct net_device *dev, unsigned int pref); + struct net_device *dev, unsigned int pref, + u32 defrtr_usr_metric); void rt6_purge_dflt_routers(struct net *net); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 2ec062aaa978..a914f33f3ed5 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -213,7 +213,8 @@ struct fib_rt_info { u8 type; u8 offload:1, trap:1, - unused:6; + offload_failed:1, + unused:5; }; struct fib_entry_notifier_info { diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d609e957a3ec..7cb5a1aace40 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1712,4 +1712,15 @@ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) atomic_read(&dest->inactconns); } +#ifdef CONFIG_IP_VS_PROTO_TCP +INDIRECT_CALLABLE_DECLARE(int + tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); +#endif + +#ifdef CONFIG_IP_VS_PROTO_UDP +INDIRECT_CALLABLE_DECLARE(int + udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); +#endif #endif /* _NET_IP_VS_H */ diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index 9259ce2b22f3..ff06246dbbb9 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -128,11 +128,12 @@ struct iucv_sock { u8 flags; u16 msglimit; u16 msglimit_peer; + atomic_t skbs_in_xmit; atomic_t msg_sent; atomic_t msg_recv; atomic_t pendings; int transport; - void (*sk_txnotify)(struct sk_buff *skb, + void (*sk_txnotify)(struct sock *sk, enum iucv_tx_notify n); }; diff --git a/include/net/lapb.h b/include/net/lapb.h index ccc3d1f020b0..eee73442a1ba 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -92,6 +92,7 @@ struct lapb_cb { unsigned short n2, n2count; unsigned short t1, t2; struct timer_list t1timer, t2timer; + bool t1timer_stop, t2timer_stop; /* Internal control information */ struct sk_buff_head write_queue; @@ -103,6 +104,7 @@ struct lapb_cb { struct lapb_frame frmr_data; unsigned char frmr_type; + spinlock_t lock; refcount_t refcnt; }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2bdbf62f4ecd..2d1d629e5d14 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1296,6 +1296,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * the "0-length PSDU" field included there. The value for it is * in &struct ieee80211_rx_status. Note that if this value isn't * known the frame shouldn't be reported. + * @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by + * hardware or driver) */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1328,6 +1330,7 @@ enum mac80211_rx_flags { RX_FLAG_RADIOTAP_HE_MU = BIT(27), RX_FLAG_RADIOTAP_LSIG = BIT(28), RX_FLAG_NO_PSDU = BIT(29), + RX_FLAG_8023 = BIT(30), }; /** @@ -1649,11 +1652,15 @@ enum ieee80211_vif_flags { * The driver supports sending frames passed as 802.3 frames by mac80211. * It must also support sending 802.11 packets for the same interface. * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload + * @IEEE80211_OFFLOAD_DECAP_ENABLED: rx encapsulation offload is enabled + * The driver supports passing received 802.11 frames as 802.3 frames to + * mac80211. */ enum ieee80211_offload_flags { IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0), IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1), + IEEE80211_OFFLOAD_DECAP_ENABLED = BIT(2), }; /** @@ -2389,6 +2396,9 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation * offload * + * @IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD: Hardware supports rx decapsulation + * offload + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2442,6 +2452,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT, IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD, + IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -3881,6 +3892,8 @@ enum ieee80211_reconfig_type { * @sta_set_4addr: Called to notify the driver when a station starts/stops using * 4-address mode * @set_sar_specs: Update the SAR (TX power) settings. + * @sta_set_decap_offload: Called to notify the driver when a station is allowed + * to use rx decapsulation offload */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4198,6 +4211,9 @@ struct ieee80211_ops { struct ieee80211_sta *sta, bool enabled); int (*set_sar_specs)(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); + void (*sta_set_decap_offload)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enabled); }; /** @@ -5513,7 +5529,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, void *data); /** - * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces + * ieee80211_iterate_active_interfaces_mtx - iterate active interfaces * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. @@ -5524,12 +5540,12 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, * @iterator: the iterator function to call, cannot sleep * @data: first argument of the iterator function */ -void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, - u32 iter_flags, - void (*iterator)(void *data, +void ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw, + u32 iter_flags, + void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), - void *data); + void *data); /** * ieee80211_iterate_stations_atomic - iterate stations diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 29567875f428..dcaee24a4d87 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -165,7 +165,7 @@ struct net { struct netns_xfrm xfrm; #endif - atomic64_t net_cookie; /* written once */ + u64 net_cookie; /* written once */ #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; @@ -224,8 +224,6 @@ extern struct list_head net_namespace_list; struct net *get_net_ns_by_pid(pid_t pid); struct net *get_net_ns_by_fd(int fd); -u64 __net_gen_cookie(struct net *net); - #ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); void ipx_unregister_sysctl(void); diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 16e8b2f8d006..54c4d5c908a5 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -107,6 +107,10 @@ struct flow_offload_tuple { u8 l3proto; u8 l4proto; + + /* All members above are keys for lookups, see flow_offload_hash(). */ + struct { } __hash; + u8 dir; u16 mtu; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index f4af8362d234..fdec57d862b7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -200,14 +200,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) } int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); -unsigned int nft_parse_register(const struct nlattr *attr); int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); -int nft_validate_register_load(enum nft_registers reg, unsigned int len); -int nft_validate_register_store(const struct nft_ctx *ctx, - enum nft_registers reg, - const struct nft_data *data, - enum nft_data_types type, unsigned int len); +int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len); +int nft_parse_register_store(const struct nft_ctx *ctx, + const struct nlattr *attr, u8 *dreg, + const struct nft_data *data, + enum nft_data_types type, unsigned int len); /** * struct nft_userdata - user defined data associated with an object @@ -721,6 +720,8 @@ void *nft_set_elem_init(const struct nft_set *set, const struct nft_set_ext_tmpl *tmpl, const u32 *key, const u32 *key_end, const u32 *data, u64 timeout, u64 expiration, gfp_t gfp); +int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_expr *expr_array[]); void nft_set_elem_destroy(const struct nft_set *set, void *elem, bool destroy_expr); @@ -1105,11 +1106,17 @@ struct nft_table { u16 family:6, flags:8, genmask:2; + u32 nlpid; char *name; u16 udlen; u8 *udata; }; +static inline bool nft_table_has_owner(const struct nft_table *table) +{ + return table->flags & NFT_TABLE_F_OWNER; +} + static inline bool nft_base_chain_netdev(int family, u32 hooknum) { return family == NFPROTO_NETDEV || diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 8657e6815b07..fd10a7862fdc 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -26,21 +26,21 @@ void nf_tables_core_module_exit(void); struct nft_bitwise_fast_expr { u32 mask; u32 xor; - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; }; struct nft_cmp_fast_expr { u32 data; u32 mask; - enum nft_registers sreg:8; + u8 sreg; u8 len; bool inv; }; struct nft_immediate_expr { struct nft_data data; - enum nft_registers dreg:8; + u8 dreg; u8 dlen; }; @@ -60,14 +60,14 @@ struct nft_payload { enum nft_payload_bases base:8; u8 offset; u8 len; - enum nft_registers dreg:8; + u8 dreg; }; struct nft_payload_set { enum nft_payload_bases base:8; u8 offset; u8 len; - enum nft_registers sreg:8; + u8 sreg; u8 csum_type; u8 csum_offset; u8 csum_flags; diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 628b6fa579cd..237f3757637e 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -5,7 +5,7 @@ #include <net/netfilter/nf_tables.h> struct nft_fib { - enum nft_registers dreg:8; + u8 dreg; u8 result; u32 flags; }; diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 07e2fd507963..2dce55c736f4 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -7,8 +7,8 @@ struct nft_meta { enum nft_meta_keys key:8; union { - enum nft_registers dreg:8; - enum nft_registers sreg:8; + u8 dreg; + u8 sreg; }; }; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 8e4fcac4df72..70a2a085dd1a 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -188,6 +188,8 @@ struct netns_ipv4 { int sysctl_udp_wmem_min; int sysctl_udp_rmem_min; + int sysctl_fib_notify_on_flag_change; + #ifdef CONFIG_NET_L3_MASTER_DEV int sysctl_udp_l3mdev_accept; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5ec054473d81..21c0debbd39e 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -51,6 +51,7 @@ struct netns_sysctl_ipv6 { int max_hbh_opts_len; int seg6_flowlabel; bool skip_notify_on_dev_down; + int fib_notify_on_flag_change; }; struct netns_ipv6 { diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 226930d66b63..7bc057aee40b 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -66,7 +66,12 @@ struct nh_info { struct nh_grp_entry { struct nexthop *nh; u8 weight; - atomic_t upper_bound; + + union { + struct { + atomic_t upper_bound; + } mpath; + }; struct list_head nh_list; struct nexthop *nh_parent; /* nexthop of group with this entry */ @@ -109,6 +114,11 @@ enum nexthop_event_type { NEXTHOP_EVENT_REPLACE, }; +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE, + NH_NOTIFIER_INFO_TYPE_GRP, +}; + struct nh_notifier_single_info { struct net_device *dev; u8 gw_family; @@ -137,7 +147,7 @@ struct nh_notifier_info { struct net *net; struct netlink_ext_ack *extack; u32 id; - bool is_grp; + enum nh_notifier_info_type type; union { struct nh_notifier_single_info *nh; struct nh_notifier_grp_info *nh_grp; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f2a9c44171c..255e4f4b521f 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -783,6 +783,42 @@ struct tc_mq_qopt_offload { }; }; +enum tc_htb_command { + /* Root */ + TC_HTB_CREATE, /* Initialize HTB offload. */ + TC_HTB_DESTROY, /* Destroy HTB offload. */ + + /* Classes */ + /* Allocate qid and create leaf. */ + TC_HTB_LEAF_ALLOC_QUEUE, + /* Convert leaf to inner, preserve and return qid, create new leaf. */ + TC_HTB_LEAF_TO_INNER, + /* Delete leaf, while siblings remain. */ + TC_HTB_LEAF_DEL, + /* Delete leaf, convert parent to leaf, preserving qid. */ + TC_HTB_LEAF_DEL_LAST, + /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ + TC_HTB_LEAF_DEL_LAST_FORCE, + /* Modify parameters of a node. */ + TC_HTB_NODE_MODIFY, + + /* Class qdisc */ + TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ +}; + +struct tc_htb_qopt_offload { + struct netlink_ext_ack *extack; + enum tc_htb_command command; + u16 classid; + u32 parent_classid; + u16 qid; + u16 moved_qid; + u64 rate; + u64 ceil; +}; + +#define TC_HTB_CLASSID_ROOT U32_MAX + enum tc_red_command { TC_RED_REPLACE, TC_RED_DESTROY, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 639e465a108f..2d6eb60c58c8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -210,7 +210,8 @@ struct Qdisc_class_ops { int (*change)(struct Qdisc *, u32, u32, struct nlattr **, unsigned long *, struct netlink_ext_ack *); - int (*delete)(struct Qdisc *, unsigned long); + int (*delete)(struct Qdisc *, unsigned long, + struct netlink_ext_ack *); void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ @@ -388,6 +389,7 @@ struct qdisc_skb_cb { #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; u16 mru; + bool post_ct; }; typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); @@ -551,14 +553,20 @@ static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) return qdisc->dev_queue->dev; } -static inline void sch_tree_lock(const struct Qdisc *q) +static inline void sch_tree_lock(struct Qdisc *q) { - spin_lock_bh(qdisc_root_sleeping_lock(q)); + if (q->flags & TCQ_F_MQROOT) + spin_lock_bh(qdisc_lock(q)); + else + spin_lock_bh(qdisc_root_sleeping_lock(q)); } -static inline void sch_tree_unlock(const struct Qdisc *q) +static inline void sch_tree_unlock(struct Qdisc *q) { - spin_unlock_bh(qdisc_root_sleeping_lock(q)); + if (q->flags & TCQ_F_MQROOT) + spin_unlock_bh(qdisc_lock(q)); + else + spin_unlock_bh(qdisc_root_sleeping_lock(q)); } extern struct Qdisc noop_qdisc; @@ -1143,7 +1151,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, old = *pold; *pold = new; if (old != NULL) - qdisc_tree_flush_backlog(old); + qdisc_purge_queue(old); sch_tree_unlock(sch); return old; diff --git a/include/net/sock.h b/include/net/sock.h index 129d200bccb4..636810ddcd9b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -226,7 +226,7 @@ struct sock_common { struct hlist_nulls_node skc_nulls_node; }; unsigned short skc_tx_queue_mapping; -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING unsigned short skc_rx_queue_mapping; #endif union { @@ -356,7 +356,7 @@ struct sock { #define sk_nulls_node __sk_common.skc_nulls_node #define sk_refcnt __sk_common.skc_refcnt #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING #define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping #endif @@ -1174,6 +1174,8 @@ struct proto { int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); + bool (*bpf_bypass_getsockopt)(int level, + int optname); void (*release_cb)(struct sock *sk); @@ -1350,14 +1352,18 @@ sk_memory_allocated_sub(struct sock *sk, int amt) atomic_long_sub(amt, sk->sk_prot->memory_allocated); } +#define SK_ALLOC_PERCPU_COUNTER_BATCH 16 + static inline void sk_sockets_allocated_dec(struct sock *sk) { - percpu_counter_dec(sk->sk_prot->sockets_allocated); + percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1, + SK_ALLOC_PERCPU_COUNTER_BATCH); } static inline void sk_sockets_allocated_inc(struct sock *sk) { - percpu_counter_inc(sk->sk_prot->sockets_allocated); + percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1, + SK_ALLOC_PERCPU_COUNTER_BATCH); } static inline u64 @@ -1834,7 +1840,7 @@ static inline int sk_tx_queue_get(const struct sock *sk) static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) { -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); @@ -1848,20 +1854,20 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) static inline void sk_rx_queue_clear(struct sock *sk) { -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; #endif } -#ifdef CONFIG_XPS static inline int sk_rx_queue_get(const struct sock *sk) { +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) return sk->sk_rx_queue_mapping; +#endif return -1; } -#endif static inline void sk_set_socket(struct sock *sk, struct socket *sock) { diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 99cd538d6519..b7fc7d0f54e2 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -16,20 +16,6 @@ #define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1) #define SWITCHDEV_F_DEFER BIT(2) -struct switchdev_trans { - bool ph_prepare; -}; - -static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans) -{ - return trans && trans->ph_prepare; -} - -static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans) -{ - return trans && !trans->ph_prepare; -} - enum switchdev_attr_id { SWITCHDEV_ATTR_ID_UNDEFINED, SWITCHDEV_ATTR_ID_PORT_STP_STATE, @@ -41,10 +27,12 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, -#if IS_ENABLED(CONFIG_BRIDGE_MRP) - SWITCHDEV_ATTR_ID_MRP_PORT_STATE, SWITCHDEV_ATTR_ID_MRP_PORT_ROLE, -#endif +}; + +struct switchdev_brport_flags { + unsigned long val; + unsigned long mask; }; struct switchdev_attr { @@ -55,16 +43,13 @@ struct switchdev_attr { void (*complete)(struct net_device *dev, int err, void *priv); union { u8 stp_state; /* PORT_STP_STATE */ - unsigned long brport_flags; /* PORT_{PRE}_BRIDGE_FLAGS */ + struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */ bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */ bool mc_disabled; /* MC_DISABLED */ -#if IS_ENABLED(CONFIG_BRIDGE_MRP) - u8 mrp_port_state; /* MRP_PORT_STATE */ u8 mrp_port_role; /* MRP_PORT_ROLE */ -#endif } u; }; @@ -73,7 +58,6 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_PORT_VLAN, SWITCHDEV_OBJ_ID_PORT_MDB, SWITCHDEV_OBJ_ID_HOST_MDB, -#if IS_ENABLED(CONFIG_BRIDGE_MRP) SWITCHDEV_OBJ_ID_MRP, SWITCHDEV_OBJ_ID_RING_TEST_MRP, SWITCHDEV_OBJ_ID_RING_ROLE_MRP, @@ -81,8 +65,6 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_IN_TEST_MRP, SWITCHDEV_OBJ_ID_IN_ROLE_MRP, SWITCHDEV_OBJ_ID_IN_STATE_MRP, - -#endif }; struct switchdev_obj { @@ -97,8 +79,7 @@ struct switchdev_obj { struct switchdev_obj_port_vlan { struct switchdev_obj obj; u16 flags; - u16 vid_begin; - u16 vid_end; + u16 vid; }; #define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \ @@ -115,7 +96,6 @@ struct switchdev_obj_port_mdb { container_of((OBJ), struct switchdev_obj_port_mdb, obj) -#if IS_ENABLED(CONFIG_BRIDGE_MRP) /* SWITCHDEV_OBJ_ID_MRP */ struct switchdev_obj_mrp { struct switchdev_obj obj; @@ -147,6 +127,7 @@ struct switchdev_obj_ring_role_mrp { struct switchdev_obj obj; u8 ring_role; u32 ring_id; + u8 sw_backup; }; #define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \ @@ -181,6 +162,7 @@ struct switchdev_obj_in_role_mrp { u32 ring_id; u16 in_id; u8 in_role; + u8 sw_backup; }; #define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \ @@ -195,8 +177,6 @@ struct switchdev_obj_in_state_mrp { #define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \ container_of((OBJ), struct switchdev_obj_in_state_mrp, obj) -#endif - typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); enum switchdev_notifier_type { @@ -234,14 +214,12 @@ struct switchdev_notifier_fdb_info { struct switchdev_notifier_port_obj_info { struct switchdev_notifier_info info; /* must be first */ const struct switchdev_obj *obj; - struct switchdev_trans *trans; bool handled; }; struct switchdev_notifier_port_attr_info { struct switchdev_notifier_info info; /* must be first */ const struct switchdev_attr *attr; - struct switchdev_trans *trans; bool handled; }; @@ -261,7 +239,8 @@ switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info) void switchdev_deferred_process(void); int switchdev_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr); + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack); int switchdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct netlink_ext_ack *extack); @@ -289,7 +268,6 @@ int switchdev_handle_port_obj_add(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), int (*add_cb)(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans, struct netlink_ext_ack *extack)); int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, @@ -302,7 +280,7 @@ int switchdev_handle_port_attr_set(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), int (*set_cb)(struct net_device *dev, const struct switchdev_attr *attr, - struct switchdev_trans *trans)); + struct netlink_ext_ack *extack)); #else static inline void switchdev_deferred_process(void) @@ -310,7 +288,8 @@ static inline void switchdev_deferred_process(void) } static inline int switchdev_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr) + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -373,7 +352,6 @@ switchdev_handle_port_obj_add(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), int (*add_cb)(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans, struct netlink_ext_ack *extack)) { return 0; @@ -395,7 +373,7 @@ switchdev_handle_port_attr_set(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), int (*set_cb)(struct net_device *dev, const struct switchdev_attr *attr, - struct switchdev_trans *trans)) + struct netlink_ext_ack *extack)) { return 0; } diff --git a/include/net/tcp.h b/include/net/tcp.h index 78d13c88720f..963cd86d12dd 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -403,6 +403,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); +bool tcp_bpf_bypass_getsockopt(int level, int optname); int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); @@ -630,6 +631,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); unsigned int tcp_current_mss(struct sock *sk); +u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); /* Bound MSS / TSO packet size with the half of the window */ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) @@ -1430,12 +1432,29 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied); */ static inline bool tcp_rmem_pressure(const struct sock *sk) { - int rcvbuf = READ_ONCE(sk->sk_rcvbuf); - int threshold = rcvbuf - (rcvbuf >> 3); + int rcvbuf, threshold; + + if (tcp_under_memory_pressure(sk)) + return true; + + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + threshold = rcvbuf - (rcvbuf >> 3); return atomic_read(&sk->sk_rmem_alloc) > threshold; } +static inline bool tcp_epollin_ready(const struct sock *sk, int target) +{ + const struct tcp_sock *tp = tcp_sk(sk); + int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); + + if (avail <= 0) + return false; + + return (avail >= target) || tcp_rmem_pressure(sk) || + (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); +} + extern void tcp_openreq_init_rwin(struct request_sock *req, const struct sock *sk_listener, const struct dst_entry *dst); @@ -2060,7 +2079,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd); -extern void tcp_rack_mark_lost(struct sock *sk); +extern bool tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, u64 xmit_time); extern void tcp_rack_reo_timeout(struct sock *sk); diff --git a/include/net/udp.h b/include/net/udp.h index 877832bed471..a132a02b2f2c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -173,12 +173,15 @@ INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); +INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); + struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, - netdev_features_t features); + netdev_features_t features, bool is_ipv6); static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) { @@ -467,6 +470,7 @@ void udp_init(void); DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void); +void udp_encap_disable(void); #if IS_ENABLED(CONFIG_IPV6) DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 2ea453dac876..afc7ce713657 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -129,12 +129,16 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type); static inline void udp_tunnel_get_rx_info(struct net_device *dev) { ASSERT_RTNL(); + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev); } static inline void udp_tunnel_drop_rx_info(struct net_device *dev) { ASSERT_RTNL(); + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev); } @@ -177,9 +181,8 @@ static inline void udp_tunnel_encap_enable(struct socket *sock) #if IS_ENABLED(CONFIG_IPV6) if (sock->sk->sk_family == PF_INET6) ipv6_stub->udpv6_encap_enable(); - else #endif - udp_encap_enable(); + udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 @@ -323,6 +326,8 @@ udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, static inline void udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) { + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->add_port(dev, ti); } @@ -330,6 +335,8 @@ udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) static inline void udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) { + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->del_port(dev, ti); } diff --git a/include/net/xdp.h b/include/net/xdp.h index 600acb307db6..a5bc214a49d9 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -76,6 +76,25 @@ struct xdp_buff { u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ }; +static __always_inline void +xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) +{ + xdp->frame_sz = frame_sz; + xdp->rxq = rxq; +} + +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + + xdp->data_hard_start = hard_start; + xdp->data = data; + xdp->data_end = data + data_len; + xdp->data_meta = meta_valid ? data : data + 1; +} + /* Reserve memory area at end-of data area. * * This macro reserves tailroom in the XDP buffer by limiting the @@ -145,6 +164,12 @@ void xdp_warn(const char *msg, const char *func, const int line); #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); +struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct sk_buff *skb, + struct net_device *dev); +struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct net_device *dev); +int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); static inline void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 693285e76f13..4c52c2fd22a1 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -547,10 +547,6 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client, void *context), void *context, struct ib_sa_query **sa_query); -bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, - struct ib_device *device, - u8 port_num); - static inline bool sa_path_is_roce(struct sa_path_rec *rec) { return ((rec->rec_type == SA_PATH_REC_TYPE_ROCE_V1) || diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 7752211c9638..676c57f5ca80 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2007 Cisco Systems. All rights reserved. + * Copyright (c) 2020 Intel Corporation. All rights reserved. */ #ifndef IB_UMEM_H @@ -13,6 +14,7 @@ struct ib_ucontext; struct ib_umem_odp; +struct dma_buf_attach_ops; struct ib_umem { struct ib_device *ibdev; @@ -22,12 +24,29 @@ struct ib_umem { unsigned long address; u32 writable : 1; u32 is_odp : 1; + u32 is_dmabuf : 1; struct work_struct work; struct sg_table sg_head; int nmap; unsigned int sg_nents; }; +struct ib_umem_dmabuf { + struct ib_umem umem; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct scatterlist *first_sg; + struct scatterlist *last_sg; + unsigned long first_sg_offset; + unsigned long last_sg_trim; + void *private; +}; + +static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) +{ + return container_of(umem, struct ib_umem_dmabuf, umem); +} + /* Returns the offset of the umem start relative to the first page. */ static inline int ib_umem_offset(struct ib_umem *umem) { @@ -86,6 +105,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt); + /** * ib_umem_find_best_pgoff - Find best HW page size * @@ -116,6 +136,14 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, dma_addr & pgoff_bitmask); } +struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, + unsigned long offset, size_t size, + int fd, int access, + const struct dma_buf_attach_ops *ops); +int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); +void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); +void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); + #else /* CONFIG_INFINIBAND_USER_MEM */ #include <linux/err.h> @@ -124,12 +152,12 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-EOPNOTSUPP); } static inline void ib_umem_release(struct ib_umem *umem) { } static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { - return -EINVAL; + return -EOPNOTSUPP; } static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, @@ -143,7 +171,21 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, { return 0; } +static inline +struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, + unsigned long offset, + size_t size, int fd, + int access, + struct dma_buf_attach_ops *ops) +{ + return ERR_PTR(-EOPNOTSUPP); +} +static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) +{ + return -EOPNOTSUPP; +} +static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } +static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } #endif /* CONFIG_INFINIBAND_USER_MEM */ - #endif /* IB_UMEM_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9fed65bf9279..ca28fca5736b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2,7 +2,7 @@ /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. @@ -2434,6 +2434,10 @@ struct ib_device_ops { struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); + struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset, + u64 length, u64 virt_addr, int fd, + int mr_access_flags, + struct ib_udata *udata); struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, @@ -4670,4 +4674,7 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) return (u32)(v & IB_GRH_FLOWLABEL_MASK); } + +const struct ib_port_immutable* +ib_port_immutable_read(struct ib_device *dev, unsigned int port); #endif /* IB_VERBS_H */ diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h index eb99856e8b30..e75cf9742e04 100644 --- a/include/rdma/rdma_counter.h +++ b/include/rdma/rdma_counter.h @@ -46,7 +46,8 @@ struct rdma_counter { void rdma_counter_init(struct ib_device *dev); void rdma_counter_release(struct ib_device *dev); int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, - bool on, enum rdma_nl_counter_mask mask); + enum rdma_nl_counter_mask mask, + struct netlink_ext_ack *extack); int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port); int rdma_counter_unbind_qp(struct ib_qp *qp, bool force); diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index b3bbd10eb3f0..02f966e9358f 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -187,7 +187,7 @@ struct iscsi_conn { struct iscsi_task *task; /* xmit task in progress */ /* xmit */ - spinlock_t taskqueuelock; /* protects the next three lists */ + /* items must be added/deleted under frwd lock */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head cmdqueue; /* data-path cmd queue */ struct list_head requeue; /* tasks needing another run */ @@ -332,7 +332,7 @@ struct iscsi_session { * cmdsn, queued_cmdsn * * session resources: * * - cmdpool kfifo_out , * - * - mgmtpool, */ + * - mgmtpool, queues */ spinlock_t back_lock; /* protects cmdsn_exp * * cmdsn_max, * * cmdpool kfifo_in */ @@ -395,6 +395,8 @@ extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, extern void iscsi_host_remove(struct Scsi_Host *shost); extern void iscsi_host_free(struct Scsi_Host *shost); extern int iscsi_target_alloc(struct scsi_target *starget); +extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, + uint16_t requested_cmds_max); /* * session management diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 4e2d61e8fb1e..9271d7a49b90 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -391,10 +391,6 @@ struct sas_ha_struct { int strict_wide_ports; /* both sas_addr and attached_sas_addr must match * their siblings when forming wide ports */ - /* LLDD calls these to notify the class of an event. */ - int (*notify_port_event)(struct asd_sas_phy *, enum port_event); - int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); - void *lldd_ha; /* not touched by sas class code */ struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */ @@ -706,4 +702,9 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev); int sas_request_addr(struct Scsi_Host *shost, u8 *addr); +int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, + gfp_t gfp_flags); +int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, + gfp_t gfp_flags); + #endif /* _SASLIB_H_ */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 5339baadc082..e75cca25338a 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -116,6 +116,7 @@ static inline int scsi_is_wlun(u64 lun) #define CLEAR_TASK_SET 0x0e #define INITIATE_RECOVERY 0x0f /* SCSI-II only */ #define RELEASE_RECOVERY 0x10 /* SCSI-II only */ +#define TERMINATE_IO_PROC 0x11 /* SCSI-II only */ #define CLEAR_ACA 0x16 #define LOGICAL_UNIT_RESET 0x17 #define SIMPLE_QUEUE_TAG 0x20 @@ -159,6 +160,7 @@ static inline int scsi_is_wlun(u64 lun) * paths might yield different results */ #define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */ #define DID_MEDIUM_ERROR 0x13 /* Medium error */ +#define DID_TRANSPORT_MARGINAL 0x14 /* Transport marginal errors */ #define DRIVER_OK 0x00 /* Driver status */ /* diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 69ade4fb71aa..ace15b5dc956 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -308,6 +308,11 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd) #define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \ for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i) +static inline void set_status_byte(struct scsi_cmnd *cmd, char status) +{ + cmd->result = (cmd->result & 0xffffff00) | status; +} + static inline void set_msg_byte(struct scsi_cmnd *cmd, char status) { cmd->result = (cmd->result & 0xffff00ff) | (status << 8); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 701f178b20ae..e30fd963b97d 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -314,6 +314,12 @@ struct scsi_host_template { * Status: OPTIONAL */ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); + /* + * Optional routine that allows the transport to decide if a cmd + * is retryable. Return true if the transport is in a state the + * cmd should be retried on. + */ + bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd); /* This is an optional routine that allows transport to initiate * LLD adapter or firmware reset using sysfs attribute. diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index c759b29e46c7..14214ee121ad 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -67,6 +67,7 @@ enum fc_port_state { FC_PORTSTATE_ERROR, FC_PORTSTATE_LOOPBACK, FC_PORTSTATE_DELETED, + FC_PORTSTATE_MARGINAL, }; @@ -742,7 +743,6 @@ struct fc_function_template { unsigned long disable_target_scan:1; }; - /** * fc_remote_port_chkready - called to validate the remote port state * prior to initiating io to the port. @@ -758,6 +758,7 @@ fc_remote_port_chkready(struct fc_rport *rport) switch (rport->port_state) { case FC_PORTSTATE_ONLINE: + case FC_PORTSTATE_MARGINAL: if (rport->roles & FC_PORT_ROLE_FCP_TARGET) result = 0; else if (rport->flags & FC_RPORT_DEVLOSS_PENDING) @@ -839,6 +840,7 @@ int fc_vport_terminate(struct fc_vport *vport); int fc_block_rport(struct fc_rport *rport); int fc_block_scsi_eh(struct scsi_cmnd *cmnd); enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd); +bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd); static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job) { diff --git a/include/soc/brcmstb/common.h b/include/soc/brcmstb/common.h deleted file mode 100644 index e4fe76856de9..000000000000 --- a/include/soc/brcmstb/common.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright © 2014 NVIDIA Corporation - * Copyright © 2015 Broadcom Corporation - */ - -#ifndef __SOC_BRCMSTB_COMMON_H__ -#define __SOC_BRCMSTB_COMMON_H__ - -bool soc_is_brcmstb(void); - -#endif /* __SOC_BRCMSTB_COMMON_H__ */ diff --git a/include/soc/canaan/k210-sysctl.h b/include/soc/canaan/k210-sysctl.h new file mode 100644 index 000000000000..0c2b2c2dabca --- /dev/null +++ b/include/soc/canaan/k210-sysctl.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef K210_SYSCTL_H +#define K210_SYSCTL_H + +/* + * Kendryte K210 SoC system controller registers offsets. + * Taken from Kendryte SDK (kendryte-standalone-sdk). + */ +#define K210_SYSCTL_GIT_ID 0x00 /* Git short commit id */ +#define K210_SYSCTL_UART_BAUD 0x04 /* Default UARTHS baud rate */ +#define K210_SYSCTL_PLL0 0x08 /* PLL0 controller */ +#define K210_SYSCTL_PLL1 0x0C /* PLL1 controller */ +#define K210_SYSCTL_PLL2 0x10 /* PLL2 controller */ +#define K210_SYSCTL_PLL_LOCK 0x18 /* PLL lock tester */ +#define K210_SYSCTL_ROM_ERROR 0x1C /* AXI ROM detector */ +#define K210_SYSCTL_SEL0 0x20 /* Clock select controller 0 */ +#define K210_SYSCTL_SEL1 0x24 /* Clock select controller 1 */ +#define K210_SYSCTL_EN_CENT 0x28 /* Central clock enable */ +#define K210_SYSCTL_EN_PERI 0x2C /* Peripheral clock enable */ +#define K210_SYSCTL_SOFT_RESET 0x30 /* Soft reset ctrl */ +#define K210_SYSCTL_PERI_RESET 0x34 /* Peripheral reset controller */ +#define K210_SYSCTL_THR0 0x38 /* Clock threshold controller 0 */ +#define K210_SYSCTL_THR1 0x3C /* Clock threshold controller 1 */ +#define K210_SYSCTL_THR2 0x40 /* Clock threshold controller 2 */ +#define K210_SYSCTL_THR3 0x44 /* Clock threshold controller 3 */ +#define K210_SYSCTL_THR4 0x48 /* Clock threshold controller 4 */ +#define K210_SYSCTL_THR5 0x4C /* Clock threshold controller 5 */ +#define K210_SYSCTL_THR6 0x50 /* Clock threshold controller 6 */ +#define K210_SYSCTL_MISC 0x54 /* Miscellaneous controller */ +#define K210_SYSCTL_PERI 0x58 /* Peripheral controller */ +#define K210_SYSCTL_SPI_SLEEP 0x5C /* SPI sleep controller */ +#define K210_SYSCTL_RESET_STAT 0x60 /* Reset source status */ +#define K210_SYSCTL_DMA_SEL0 0x64 /* DMA handshake selector 0 */ +#define K210_SYSCTL_DMA_SEL1 0x68 /* DMA handshake selector 1 */ +#define K210_SYSCTL_POWER_SEL 0x6C /* IO Power Mode Select controller */ + +void k210_clk_early_init(void __iomem *regs); + +#endif diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 3feddfec9f87..4925a1b59dc9 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -27,12 +27,6 @@ #define QE_NUM_OF_BRGS 16 #define QE_NUM_OF_PORTS 1024 -/* Memory partitions -*/ -#define MEM_PART_SYSTEM 0 -#define MEM_PART_SECONDARY 1 -#define MEM_PART_MURAM 2 - /* Clocks and BRGs */ enum qe_clock { QE_CLK_NONE = 0, @@ -102,8 +96,9 @@ s32 cpm_muram_alloc(unsigned long size, unsigned long align); void cpm_muram_free(s32 offset); s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); void __iomem *cpm_muram_addr(unsigned long offset); -unsigned long cpm_muram_offset(void __iomem *addr); +unsigned long cpm_muram_offset(const void __iomem *addr); dma_addr_t cpm_muram_dma(void __iomem *addr); +void cpm_muram_free_addr(const void __iomem *addr); #else static inline s32 cpm_muram_alloc(unsigned long size, unsigned long align) @@ -126,7 +121,7 @@ static inline void __iomem *cpm_muram_addr(unsigned long offset) return NULL; } -static inline unsigned long cpm_muram_offset(void __iomem *addr) +static inline unsigned long cpm_muram_offset(const void __iomem *addr) { return -ENOSYS; } @@ -135,6 +130,9 @@ static inline dma_addr_t cpm_muram_dma(void __iomem *addr) { return 0; } +static inline void cpm_muram_free_addr(const void __iomem *addr) +{ +} #endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */ /* QE PIO */ @@ -239,6 +237,7 @@ static inline int qe_alive_during_sleep(void) #define qe_muram_addr cpm_muram_addr #define qe_muram_offset cpm_muram_offset #define qe_muram_dma cpm_muram_dma +#define qe_muram_free_addr cpm_muram_free_addr #ifdef CONFIG_PPC32 #define qe_iowrite8(val, addr) out_8(addr, val) diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h index dc4e79468094..9696a5b9b5d1 100644 --- a/include/soc/fsl/qe/ucc_fast.h +++ b/include/soc/fsl/qe/ucc_fast.h @@ -146,7 +146,6 @@ struct ucc_fast_info { resource_size_t regs; int irq; u32 uccm_mask; - int bd_mem_part; int brkpt_support; int grant_support; int tsa; diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h index 5a34b87d89e3..15e3397cec58 100644 --- a/include/soc/mediatek/smi.h +++ b/include/soc/mediatek/smi.h @@ -9,15 +9,14 @@ #include <linux/bitops.h> #include <linux/device.h> -#ifdef CONFIG_MTK_SMI - -#define MTK_LARB_NR_MAX 16 +#if IS_ENABLED(CONFIG_MTK_SMI) #define MTK_SMI_MMU_EN(port) BIT(port) struct mtk_smi_larb_iommu { struct device *dev; unsigned int mmu; + unsigned char bank[32]; }; /* diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 2f4cd3288bcc..425ff29d9389 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -54,16 +54,17 @@ * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses * of the switch port net devices, towards the CPU port module. * PGID_UC: the flooding destinations for unknown unicast traffic. - * PGID_MC: the flooding destinations for broadcast and non-IP multicast - * traffic. + * PGID_MC: the flooding destinations for non-IP multicast traffic. * PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic. * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic. + * PGID_BC: the flooding destinations for broadcast traffic. */ -#define PGID_CPU 59 -#define PGID_UC 60 -#define PGID_MC 61 -#define PGID_MCIPV4 62 -#define PGID_MCIPV6 63 +#define PGID_CPU 58 +#define PGID_UC 59 +#define PGID_MC 60 +#define PGID_MCIPV4 61 +#define PGID_MCIPV6 62 +#define PGID_BC 63 #define for_each_unicast_dest_pgid(ocelot, pgid) \ for ((pgid) = 0; \ @@ -86,9 +87,6 @@ /* Source PGIDs, one per physical port */ #define PGID_SRC 80 -#define IFH_INJ_BYPASS BIT(31) -#define IFH_INJ_POP_CNT_DISABLE (3 << 28) - #define IFH_TAG_TYPE_C 0 #define IFH_TAG_TYPE_S 1 @@ -98,10 +96,7 @@ #define IFH_REW_OP_TWO_STEP_PTP 0x3 #define IFH_REW_OP_ORIGIN_PTP 0x5 -#define OCELOT_TAG_LEN 16 -#define OCELOT_SHORT_PREFIX_LEN 4 -#define OCELOT_LONG_PREFIX_LEN 16 -#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN) +#define OCELOT_NUM_TC 8 #define OCELOT_SPEED_2500 0 #define OCELOT_SPEED_1000 1 @@ -117,6 +112,8 @@ #define REG_RESERVED_ADDR 0xffffffff #define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR) +#define OCELOT_MRP_CPUQ 7 + enum ocelot_target { ANA = 1, QS, @@ -563,6 +560,8 @@ struct ocelot_ops { int (*netdev_to_port)(struct net_device *dev); int (*reset)(struct ocelot *ocelot); u16 (*wm_enc)(u16 value); + u16 (*wm_dec)(u16 value); + void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse); }; struct ocelot_vcap_block { @@ -576,6 +575,18 @@ struct ocelot_vlan { u16 vid; }; +enum ocelot_sb { + OCELOT_SB_BUF, + OCELOT_SB_REF, + OCELOT_SB_NUM, +}; + +enum ocelot_sb_pool { + OCELOT_SB_POOL_ING, + OCELOT_SB_POOL_EGR, + OCELOT_SB_POOL_NUM, +}; + struct ocelot_port { struct ocelot *ocelot; @@ -595,10 +606,17 @@ struct ocelot_port { phy_interface_t phy_mode; u8 *xmit_template; + bool is_dsa_8021q_cpu; + bool learn_ena; + + struct net_device *bond; + bool lag_tx_active; }; struct ocelot { struct device *dev; + struct devlink *devlink; + struct devlink_port *devlink_ports; const struct ocelot_ops *ops; struct regmap *targets[TARGET_MAX]; @@ -607,7 +625,9 @@ struct ocelot { const struct ocelot_stat_layout *stats_layout; unsigned int num_stats; - int shared_queue_sz; + u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM]; + int packet_buffer_size; + int num_frame_refs; int num_mact_rows; struct net_device *hw_bridge_dev; @@ -632,10 +652,8 @@ struct ocelot { int npi; - enum ocelot_tag_prefix inj_prefix; - enum ocelot_tag_prefix xtr_prefix; - - u32 *lags; + enum ocelot_tag_prefix npi_inj_prefix; + enum ocelot_tag_prefix npi_xtr_prefix; struct list_head multicast; struct list_head pgids; @@ -661,6 +679,12 @@ struct ocelot { /* Protects the PTP clock */ spinlock_t ptp_clock_lock; struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM]; + +#if IS_ENABLED(CONFIG_BRIDGE_MRP) + u16 mrp_ring_id; + struct net_device *mrp_p_port; + struct net_device *mrp_s_port; +#endif }; struct ocelot_policer { @@ -709,6 +733,7 @@ struct ocelot_policer { /* I/O */ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); +void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg); u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, @@ -718,6 +743,40 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, u32 val, u32 reg, u32 offset); +/* Packet I/O */ +#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB) + +bool ocelot_can_inject(struct ocelot *ocelot, int grp); +void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, + u32 rew_op, struct sk_buff *skb); +int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb); +void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp); + +#else + +static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp) +{ + return false; +} + +static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port, + int grp, u32 rew_op, + struct sk_buff *skb) +{ +} + +static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, + struct sk_buff **skb) +{ + return -EIO; +} + +static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) +{ +} + +#endif + /* Hardware initialization */ int ocelot_regfields_init(struct ocelot *ocelot, const struct reg_field *const regfields); @@ -737,11 +796,16 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset); int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct ethtool_ts_info *info); void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs); +int ocelot_port_flush(struct ocelot *ocelot, int port); void ocelot_adjust_link(struct ocelot *ocelot, int port, struct phy_device *phydev); -int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled, - struct switchdev_trans *trans); +int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state); +void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot); +int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags val); +void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags val); int ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge); int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, @@ -777,5 +841,82 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb); int ocelot_port_mdb_del(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb); +int ocelot_port_lag_join(struct ocelot *ocelot, int port, + struct net_device *bond, + struct netdev_lag_upper_info *info); +void ocelot_port_lag_leave(struct ocelot *ocelot, int port, + struct net_device *bond); +void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active); + +int ocelot_devlink_sb_register(struct ocelot *ocelot); +void ocelot_devlink_sb_unregister(struct ocelot *ocelot); +int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); +int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); +int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack); +int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); +int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); +int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index); +int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index); +int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); +int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); + +#if IS_ENABLED(CONFIG_BRIDGE_MRP) +int ocelot_mrp_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp); +int ocelot_mrp_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp); +int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp); +int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp); +#else +static inline int ocelot_mrp_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + return -EOPNOTSUPP; +} + +static inline int ocelot_mrp_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + return -EOPNOTSUPP; +} + +static inline int +ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + return -EOPNOTSUPP; +} + +static inline int +ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + return -EOPNOTSUPP; +} +#endif #endif diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h index a814bc2017d8..9731895be643 100644 --- a/include/soc/mscc/ocelot_qsys.h +++ b/include/soc/mscc/ocelot_qsys.h @@ -71,11 +71,8 @@ #define QSYS_RES_STAT_GSZ 0x8 -#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12)) -#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12) -#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12) -#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0)) -#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0) +#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(x) ((x) & GENMASK(15, 0)) +#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT_M GENMASK(15, 0) #define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2)) #define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2) diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index 96300adf3648..25fd525aaf92 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -400,4 +400,301 @@ enum vcap_es0_action_field { VCAP_ES0_ACT_HIT_STICKY, }; +struct ocelot_ipv4 { + u8 addr[4]; +}; + +enum ocelot_vcap_bit { + OCELOT_VCAP_BIT_ANY, + OCELOT_VCAP_BIT_0, + OCELOT_VCAP_BIT_1 +}; + +struct ocelot_vcap_u8 { + u8 value[1]; + u8 mask[1]; +}; + +struct ocelot_vcap_u16 { + u8 value[2]; + u8 mask[2]; +}; + +struct ocelot_vcap_u24 { + u8 value[3]; + u8 mask[3]; +}; + +struct ocelot_vcap_u32 { + u8 value[4]; + u8 mask[4]; +}; + +struct ocelot_vcap_u40 { + u8 value[5]; + u8 mask[5]; +}; + +struct ocelot_vcap_u48 { + u8 value[6]; + u8 mask[6]; +}; + +struct ocelot_vcap_u64 { + u8 value[8]; + u8 mask[8]; +}; + +struct ocelot_vcap_u128 { + u8 value[16]; + u8 mask[16]; +}; + +struct ocelot_vcap_vid { + u16 value; + u16 mask; +}; + +struct ocelot_vcap_ipv4 { + struct ocelot_ipv4 value; + struct ocelot_ipv4 mask; +}; + +struct ocelot_vcap_udp_tcp { + u16 value; + u16 mask; +}; + +struct ocelot_vcap_port { + u8 value; + u8 mask; +}; + +enum ocelot_vcap_key_type { + OCELOT_VCAP_KEY_ANY, + OCELOT_VCAP_KEY_ETYPE, + OCELOT_VCAP_KEY_LLC, + OCELOT_VCAP_KEY_SNAP, + OCELOT_VCAP_KEY_ARP, + OCELOT_VCAP_KEY_IPV4, + OCELOT_VCAP_KEY_IPV6 +}; + +struct ocelot_vcap_key_vlan { + struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */ + struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ + enum ocelot_vcap_bit dei; /* DEI */ + enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ +}; + +struct ocelot_vcap_key_etype { + struct ocelot_vcap_u48 dmac; + struct ocelot_vcap_u48 smac; + struct ocelot_vcap_u16 etype; + struct ocelot_vcap_u16 data; /* MAC data */ +}; + +struct ocelot_vcap_key_llc { + struct ocelot_vcap_u48 dmac; + struct ocelot_vcap_u48 smac; + + /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */ + struct ocelot_vcap_u32 llc; +}; + +struct ocelot_vcap_key_snap { + struct ocelot_vcap_u48 dmac; + struct ocelot_vcap_u48 smac; + + /* SNAP header: Organization Code at byte 0, Type at byte 3 */ + struct ocelot_vcap_u40 snap; +}; + +struct ocelot_vcap_key_arp { + struct ocelot_vcap_u48 smac; + enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */ + enum ocelot_vcap_bit req; /* Opcode request/reply */ + enum ocelot_vcap_bit unknown; /* Opcode unknown */ + enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */ + enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */ + + /**< Protocol addr. length 4, hardware length 6 */ + enum ocelot_vcap_bit length; + + enum ocelot_vcap_bit ip; /* Protocol address type IP */ + enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */ + struct ocelot_vcap_ipv4 sip; /* Sender IP address */ + struct ocelot_vcap_ipv4 dip; /* Target IP address */ +}; + +struct ocelot_vcap_key_ipv4 { + enum ocelot_vcap_bit ttl; /* TTL zero */ + enum ocelot_vcap_bit fragment; /* Fragment */ + enum ocelot_vcap_bit options; /* Header options */ + struct ocelot_vcap_u8 ds; + struct ocelot_vcap_u8 proto; /* Protocol */ + struct ocelot_vcap_ipv4 sip; /* Source IP address */ + struct ocelot_vcap_ipv4 dip; /* Destination IP address */ + struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */ + struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */ + struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */ + enum ocelot_vcap_bit tcp_fin; + enum ocelot_vcap_bit tcp_syn; + enum ocelot_vcap_bit tcp_rst; + enum ocelot_vcap_bit tcp_psh; + enum ocelot_vcap_bit tcp_ack; + enum ocelot_vcap_bit tcp_urg; + enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */ + enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */ + enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ +}; + +struct ocelot_vcap_key_ipv6 { + struct ocelot_vcap_u8 proto; /* IPv6 protocol */ + struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */ + struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */ + enum ocelot_vcap_bit ttl; /* TTL zero */ + struct ocelot_vcap_u8 ds; + struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */ + struct ocelot_vcap_udp_tcp sport; + struct ocelot_vcap_udp_tcp dport; + enum ocelot_vcap_bit tcp_fin; + enum ocelot_vcap_bit tcp_syn; + enum ocelot_vcap_bit tcp_rst; + enum ocelot_vcap_bit tcp_psh; + enum ocelot_vcap_bit tcp_ack; + enum ocelot_vcap_bit tcp_urg; + enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */ + enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */ + enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ +}; + +enum ocelot_mask_mode { + OCELOT_MASK_MODE_NONE, + OCELOT_MASK_MODE_PERMIT_DENY, + OCELOT_MASK_MODE_POLICY, + OCELOT_MASK_MODE_REDIRECT, +}; + +enum ocelot_es0_tag { + OCELOT_NO_ES0_TAG, + OCELOT_ES0_TAG, + OCELOT_FORCE_PORT_TAG, + OCELOT_FORCE_UNTAG, +}; + +enum ocelot_tag_tpid_sel { + OCELOT_TAG_TPID_SEL_8021Q, + OCELOT_TAG_TPID_SEL_8021AD, +}; + +struct ocelot_vcap_action { + union { + /* VCAP ES0 */ + struct { + enum ocelot_es0_tag push_outer_tag; + enum ocelot_es0_tag push_inner_tag; + enum ocelot_tag_tpid_sel tag_a_tpid_sel; + int tag_a_vid_sel; + int tag_a_pcp_sel; + u16 vid_a_val; + u8 pcp_a_val; + u8 dei_a_val; + enum ocelot_tag_tpid_sel tag_b_tpid_sel; + int tag_b_vid_sel; + int tag_b_pcp_sel; + u16 vid_b_val; + u8 pcp_b_val; + u8 dei_b_val; + }; + + /* VCAP IS1 */ + struct { + bool vid_replace_ena; + u16 vid; + bool vlan_pop_cnt_ena; + int vlan_pop_cnt; + bool pcp_dei_ena; + u8 pcp; + u8 dei; + bool qos_ena; + u8 qos_val; + u8 pag_override_mask; + u8 pag_val; + }; + + /* VCAP IS2 */ + struct { + bool cpu_copy_ena; + u8 cpu_qu_num; + enum ocelot_mask_mode mask_mode; + unsigned long port_mask; + bool police_ena; + struct ocelot_policer pol; + u32 pol_ix; + }; + }; +}; + +struct ocelot_vcap_stats { + u64 bytes; + u64 pkts; + u64 used; +}; + +enum ocelot_vcap_filter_type { + OCELOT_VCAP_FILTER_DUMMY, + OCELOT_VCAP_FILTER_PAG, + OCELOT_VCAP_FILTER_OFFLOAD, +}; + +struct ocelot_vcap_id { + unsigned long cookie; + bool tc_offload; +}; + +struct ocelot_vcap_filter { + struct list_head list; + + enum ocelot_vcap_filter_type type; + int block_id; + int goto_target; + int lookup; + u8 pag; + u16 prio; + struct ocelot_vcap_id id; + + struct ocelot_vcap_action action; + struct ocelot_vcap_stats stats; + /* For VCAP IS1 and IS2 */ + unsigned long ingress_port_mask; + /* For VCAP ES0 */ + struct ocelot_vcap_port ingress_port; + struct ocelot_vcap_port egress_port; + + enum ocelot_vcap_bit dmac_mc; + enum ocelot_vcap_bit dmac_bc; + struct ocelot_vcap_key_vlan vlan; + + enum ocelot_vcap_key_type key_type; + union { + /* OCELOT_VCAP_KEY_ANY: No specific fields */ + struct ocelot_vcap_key_etype etype; + struct ocelot_vcap_key_llc llc; + struct ocelot_vcap_key_snap snap; + struct ocelot_vcap_key_arp arp; + struct ocelot_vcap_key_ipv4 ipv4; + struct ocelot_vcap_key_ipv6 ipv6; + } key; +}; + +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule, + struct netlink_ext_ack *extack); +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); +struct ocelot_vcap_filter * +ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id, + bool tc_offload); + #endif /* _OCELOT_VCAP_H_ */ diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h index 7a2a055ba6b0..3acca067c72b 100644 --- a/include/soc/qcom/tcs.h +++ b/include/soc/qcom/tcs.h @@ -30,7 +30,13 @@ enum rpmh_state { * * @addr: the address of the resource slv_id:18:16 | offset:0:15 * @data: the resource state request - * @wait: wait for this request to be complete before sending the next + * @wait: ensure that this command is complete before returning. + * Setting "wait" here only makes sense during rpmh_write_batch() for + * active-only transfers, this is because: + * rpmh_write() - Always waits. + * (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl) + * rpmh_write_async() - Never waits. + * (There's no request completion callback) */ struct tcs_cmd { u32 addr; @@ -43,6 +49,7 @@ struct tcs_cmd { * * @state: state for the request. * @wait_for_compl: wait until we get a response from the h/w accelerator + * (same as setting cmd->wait for all commands in the request) * @num_cmds: the number of @cmds in this request * @cmds: an array of tcs_cmds */ diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h deleted file mode 100644 index 05199a97ccf4..000000000000 --- a/include/soc/tegra/emc.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2014 NVIDIA Corporation. All rights reserved. - */ - -#ifndef __SOC_TEGRA_EMC_H__ -#define __SOC_TEGRA_EMC_H__ - -struct tegra_emc; - -int tegra_emc_prepare_timing_change(struct tegra_emc *emc, - unsigned long rate); -void tegra_emc_complete_timing_change(struct tegra_emc *emc, - unsigned long rate); - -#endif /* __SOC_TEGRA_EMC_H__ */ diff --git a/include/sound/core.h b/include/sound/core.h index 0462c577d7a3..2e24f194ef70 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -122,6 +122,9 @@ struct snd_card { size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */ struct mutex memory_mutex; /* protection for the above */ +#ifdef CONFIG_SND_DEBUG + struct dentry *debugfs_root; /* debugfs root for card */ +#endif #ifdef CONFIG_PM unsigned int power_state; /* power state */ @@ -180,6 +183,9 @@ static inline struct device *snd_card_get_device_link(struct snd_card *card) extern int snd_major; extern int snd_ecards_limit; extern struct class *sound_class; +#ifdef CONFIG_SND_DEBUG +extern struct dentry *sound_debugfs_root; +#endif void snd_request_card(int card); diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 8c5e38180fb0..96666efddb39 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -66,6 +66,9 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) * @chan_name: Custom channel name to use when requesting DMA channel. * @fifo_size: FIFO size of the DAI controller in bytes * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size */ struct snd_dmaengine_dai_dma_data { dma_addr_t addr; @@ -76,6 +79,8 @@ struct snd_dmaengine_dai_dma_data { const char *chan_name; unsigned int fifo_size; unsigned int flags; + void *peripheral_config; + size_t peripheral_size; }; void snd_dmaengine_pcm_set_config_from_dai_data( diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h index bbb5a137855c..013784467bec 100644 --- a/include/sound/graph_card.h +++ b/include/sound/graph_card.h @@ -9,8 +9,10 @@ #include <sound/simple_card_utils.h> -int graph_card_probe(struct snd_soc_card *card); +int audio_graph_card_probe(struct snd_soc_card *card); -int graph_parse_of(struct asoc_simple_priv *priv, struct device *dev); +int audio_graph_parse_of(struct asoc_simple_priv *priv, struct device *dev); + +int audio_graph_remove(struct platform_device *pdev); #endif /* __GRAPH_CARD_H */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 6eed61e6cf8a..22af68b01426 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -241,6 +241,8 @@ struct hdac_bus_ops { /* get a response from the last command */ int (*get_response)(struct hdac_bus *bus, unsigned int addr, unsigned int *res); + /* notify of codec link power-up/down */ + void (*link_power)(struct hdac_device *hdev, bool enable); }; /* @@ -378,15 +380,8 @@ void snd_hdac_bus_exit(struct hdac_bus *bus); int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res); -static inline void snd_hdac_codec_link_up(struct hdac_device *codec) -{ - set_bit(codec->addr, &codec->bus->codec_powered); -} - -static inline void snd_hdac_codec_link_down(struct hdac_device *codec) -{ - clear_bit(codec->addr, &codec->bus->codec_powered); -} +void snd_hdac_codec_link_up(struct hdac_device *codec); +void snd_hdac_codec_link_down(struct hdac_device *codec); int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val); int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr, @@ -400,6 +395,7 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset); +void snd_hdac_bus_link_power(struct hdac_device *hdev, bool enable); void snd_hdac_bus_update_rirb(struct hdac_bus *bus); int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 7abf74c1c474..a125e3814b58 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -131,6 +131,8 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link); int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link); +void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable); + /* update register macro */ #define snd_hdac_updatel(addr, reg, mask, val) \ writel(((readl(addr + reg) & ~(mask)) | (val)), \ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index b55970859a13..4b3a1d374b90 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -34,6 +34,11 @@ struct hdmi_codec_daifmt { unsigned int frame_clk_inv:1; unsigned int bit_clk_master:1; unsigned int frame_clk_master:1; + /* bit_fmt could be standard PCM format or + * IEC958 encoded format. ALSA IEC958 plugin will pass + * IEC958_SUBFRAME format to the underneath driver. + */ + snd_pcm_format_t bit_fmt; }; /* diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h index 743c2f442280..d0574805865f 100644 --- a/include/sound/intel-nhlt.h +++ b/include/sound/intel-nhlt.h @@ -113,6 +113,11 @@ struct nhlt_vendor_dmic_array_config { } __packed; enum { + NHLT_CONFIG_TYPE_GENERIC = 0, + NHLT_CONFIG_TYPE_MIC_ARRAY = 1 +}; + +enum { NHLT_MIC_ARRAY_2CH_SMALL = 0xa, NHLT_MIC_ARRAY_2CH_BIG = 0xb, NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc, diff --git a/include/sound/jack.h b/include/sound/jack.h index 9eb2b5ec1ec4..1181f536557e 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -67,6 +67,7 @@ struct snd_jack { char name[100]; unsigned int key[6]; /* Keep in sync with definitions above */ #endif /* CONFIG_SND_JACK_INPUT_DEV */ + int hw_status_cache; void *private_data; void (*private_free)(struct snd_jack *); }; diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2336bf9243e1..2e1200d17d0c 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -229,7 +229,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule { unsigned int cond; int var; - int deps[4]; + int deps[5]; snd_pcm_hw_rule_func_t func; void *private; diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h index 39a77c7cea36..710c95be5509 100644 --- a/include/sound/rt5645.h +++ b/include/sound/rt5645.h @@ -22,6 +22,8 @@ struct rt5645_platform_data { bool level_trigger_irq; /* Invert JD1_1 status polarity */ bool inv_jd1_1; + /* Invert HP detect status polarity */ + bool inv_hp_pol; /* Value to asign to snd_soc_card.long_name */ const char *long_name; diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 9a43c44dcbbb..c45075024c30 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -174,7 +174,7 @@ struct snd_soc_acpi_codecs { static inline bool snd_soc_acpi_sof_parent(struct device *dev) { return dev->parent && dev->parent->driver && dev->parent->driver->name && - !strcmp(dev->parent->driver->name, "sof-audio-acpi"); + !strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi")); } #endif diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 0bce41fefd30..5b47768222b7 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -353,6 +353,12 @@ int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value); +unsigned int snd_soc_component_read_field(struct snd_soc_component *component, + unsigned int reg, unsigned int mask); +int snd_soc_component_write_field(struct snd_soc_component *component, + unsigned int reg, unsigned int mask, + unsigned int val); + /* component wide operations */ int snd_soc_component_set_sysclk(struct snd_soc_component *component, int clk_id, int source, diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 34d0dbf73ca9..1358a0ceb4d0 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -353,9 +353,9 @@ struct snd_soc_dai_driver { /* DAI capabilities */ struct snd_soc_pcm_stream capture; struct snd_soc_pcm_stream playback; - unsigned int symmetric_rates:1; + unsigned int symmetric_rate:1; unsigned int symmetric_channels:1; - unsigned int symmetric_samplebits:1; + unsigned int symmetric_sample_bits:1; /* probe ordering - for components with runtime dependencies */ int probe_order; diff --git a/include/sound/soc.h b/include/sound/soc.h index 3fa6c40a63b7..bd38015d6c6d 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -685,9 +685,9 @@ struct snd_soc_dai_link { unsigned int ignore_suspend:1; /* Symmetry requirements */ - unsigned int symmetric_rates:1; + unsigned int symmetric_rate:1; unsigned int symmetric_channels:1; - unsigned int symmetric_samplebits:1; + unsigned int symmetric_sample_bits:1; /* Do not create a PCM for this DAI link (Backend link) */ unsigned int no_pcm:1; diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h index 7abc4f0bd3ad..2a7e055584f9 100644 --- a/include/sound/sof/ext_manifest.h +++ b/include/sound/sof/ext_manifest.h @@ -58,9 +58,9 @@ struct sof_ext_man_header { /* Extended manifest elements types */ enum sof_ext_man_elem_type { SOF_EXT_MAN_ELEM_FW_VERSION = 0, - SOF_EXT_MAN_ELEM_WINDOW = SOF_IPC_EXT_WINDOW, - SOF_EXT_MAN_ELEM_CC_VERSION = SOF_IPC_EXT_CC_INFO, - SOF_EXT_MAN_ELEM_DBG_ABI = SOF_IPC_EXT_USER_ABI_INFO, + SOF_EXT_MAN_ELEM_WINDOW = 1, + SOF_EXT_MAN_ELEM_CC_VERSION = 2, + SOF_EXT_MAN_ELEM_DBG_ABI = 4, SOF_EXT_MAN_ELEM_CONFIG_DATA = 5, /**< ABI3.17 */ SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA = 6, }; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 6336780d83a7..ce2fba49c95d 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -72,6 +72,7 @@ int transport_backend_register(const struct target_backend_ops *); void target_backend_unregister(const struct target_backend_ops *); void target_complete_cmd(struct se_cmd *, u8); +void target_set_cmd_data_length(struct se_cmd *, int); void target_complete_cmd_with_length(struct se_cmd *, u8, int); void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 63dd12124139..54dcc0eb25fa 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -187,6 +187,7 @@ enum tcm_sense_reason_table { TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d), TCM_LUN_BUSY = R(0x1e), + TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f), #undef R }; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index cd74bffed5c6..a23be89119aa 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -55,8 +55,7 @@ /* tracepoints with more than 12 arguments will hit build error */ #define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +#define __BPF_DECLARE_TRACE(call, proto, args) \ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ @@ -64,6 +63,10 @@ __bpf_trace_##call(void *__data, proto) \ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ } +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) + /* * This part is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the @@ -111,6 +114,11 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef DECLARE_TRACE +#define DECLARE_TRACE(call, proto, args) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ + __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef DEFINE_EVENT_WRITABLE diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index e41c611d6d3b..899fdacf57b9 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(bcache_request, __entry->sector = bio->bi_iter.bi_sector; __entry->orig_sector = bio->bi_iter.bi_sector - 16; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", @@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u", @@ -137,7 +137,7 @@ TRACE_EVENT(bcache_read, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); __entry->cache_hit = hit; __entry->bypass = bypass; ), @@ -168,7 +168,7 @@ TRACE_EVENT(bcache_write, __entry->inode = inode; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); __entry->writeback = writeback; __entry->bypass = bypass; ), @@ -238,7 +238,7 @@ TRACE_EVENT(bcache_journal_write, __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; __entry->nr_keys = keys; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u keys %u", diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 0d782663a005..cc5ab96a7471 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -89,7 +89,7 @@ TRACE_EVENT(block_rq_requeue, __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), @@ -133,7 +133,7 @@ TRACE_EVENT(block_rq_complete, __entry->nr_sector = nr_bytes >> 9; __entry->error = error; - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), @@ -166,7 +166,7 @@ DECLARE_EVENT_CLASS(block_rq, __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -196,7 +196,7 @@ DEFINE_EVENT(block_rq, block_rq_insert, /** * block_rq_issue - issue pending block IO request operation to device driver - * @rq: block IO operation operation request + * @rq: block IO operation request * * Called when block operation request @rq from queue @q is sent to a * device driver for processing. @@ -210,7 +210,7 @@ DEFINE_EVENT(block_rq, block_rq_issue, /** * block_rq_merge - merge request with another one in the elevator - * @rq: block IO operation operation request + * @rq: block IO operation request * * Called when block operation request @rq from queue @q is merged to another * request queued in the elevator. @@ -249,7 +249,7 @@ TRACE_EVENT(block_bio_complete, __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = blk_status_to_errno(bio->bi_status); - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u [%d]", @@ -276,7 +276,7 @@ DECLARE_EVENT_CLASS(block_bio, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -433,7 +433,7 @@ TRACE_EVENT(block_split, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -474,7 +474,7 @@ TRACE_EVENT(block_bio_remap, __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", @@ -518,7 +518,7 @@ TRACE_EVENT(block_rq_remap, __entry->old_dev = dev; __entry->old_sector = from; __entry->nr_bios = blk_rq_count_bios(rq); - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index ecd24c719de4..0551ea65374f 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -99,7 +99,8 @@ struct btrfs_space_info; EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ - EMe(COMMIT_TRANS, "COMMIT_TRANS") + EM( COMMIT_TRANS, "COMMIT_TRANS") \ + EMe(FORCE_COMMIT_TRANS, "FORCE_COMMIT_TRANS") /* * First define the enums in the above macros to be exported to userspace via @@ -499,12 +500,13 @@ DEFINE_EVENT( #define show_ordered_flags(flags) \ __print_flags(flags, "|", \ - { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ - { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \ + { (1 << BTRFS_ORDERED_REGULAR), "REGULAR" }, \ { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \ - { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \ { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \ + { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \ { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ + { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ + { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \ { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) @@ -1111,15 +1113,16 @@ TRACE_EVENT(btrfs_trigger_flush, TRACE_EVENT(btrfs_flush_space, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 num_bytes, - int state, int ret), + int state, int ret, bool for_preempt), - TP_ARGS(fs_info, flags, num_bytes, state, ret), + TP_ARGS(fs_info, flags, num_bytes, state, ret, for_preempt), TP_STRUCT__entry_btrfs( __field( u64, flags ) __field( u64, num_bytes ) __field( int, state ) __field( int, ret ) + __field( bool, for_preempt ) ), TP_fast_assign_btrfs(fs_info, @@ -1127,15 +1130,16 @@ TRACE_EVENT(btrfs_flush_space, __entry->num_bytes = num_bytes; __entry->state = state; __entry->ret = ret; + __entry->for_preempt = for_preempt; ), - TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d", + TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d for_preempt=%d", __entry->state, __print_symbolic(__entry->state, FLUSH_STATES), __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), - __entry->num_bytes, __entry->ret) + __entry->num_bytes, __entry->ret, __entry->for_preempt) ); DECLARE_EVENT_CLASS(btrfs__reserved_extent, @@ -2025,6 +2029,97 @@ TRACE_EVENT(btrfs_convert_extent_bit, __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS)) ); +DECLARE_EVENT_CLASS(btrfs_dump_space_info, + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo), + + TP_ARGS(fs_info, sinfo), + + TP_STRUCT__entry_btrfs( + __field( u64, flags ) + __field( u64, total_bytes ) + __field( u64, bytes_used ) + __field( u64, bytes_pinned ) + __field( u64, bytes_reserved ) + __field( u64, bytes_may_use ) + __field( u64, bytes_readonly ) + __field( u64, reclaim_size ) + __field( int, clamp ) + __field( u64, global_reserved ) + __field( u64, trans_reserved ) + __field( u64, delayed_refs_reserved ) + __field( u64, delayed_reserved ) + __field( u64, free_chunk_space ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->flags = sinfo->flags; + __entry->total_bytes = sinfo->total_bytes; + __entry->bytes_used = sinfo->bytes_used; + __entry->bytes_pinned = sinfo->bytes_pinned; + __entry->bytes_reserved = sinfo->bytes_reserved; + __entry->bytes_may_use = sinfo->bytes_may_use; + __entry->bytes_readonly = sinfo->bytes_readonly; + __entry->reclaim_size = sinfo->reclaim_size; + __entry->clamp = sinfo->clamp; + __entry->global_reserved = fs_info->global_block_rsv.reserved; + __entry->trans_reserved = fs_info->trans_block_rsv.reserved; + __entry->delayed_refs_reserved = fs_info->delayed_refs_rsv.reserved; + __entry->delayed_reserved = fs_info->delayed_block_rsv.reserved; + __entry->free_chunk_space = atomic64_read(&fs_info->free_chunk_space); + ), + + TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu " + "bytes_pinned=%llu bytes_reserved=%llu " + "bytes_may_use=%llu bytes_readonly=%llu " + "reclaim_size=%llu clamp=%d global_reserved=%llu " + "trans_reserved=%llu delayed_refs_reserved=%llu " + "delayed_reserved=%llu chunk_free_space=%llu", + __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS), + __entry->total_bytes, __entry->bytes_used, + __entry->bytes_pinned, __entry->bytes_reserved, + __entry->bytes_may_use, __entry->bytes_readonly, + __entry->reclaim_size, __entry->clamp, + __entry->global_reserved, __entry->trans_reserved, + __entry->delayed_refs_reserved, + __entry->delayed_reserved, __entry->free_chunk_space) +); + +DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim, + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo), + TP_ARGS(fs_info, sinfo) +); + +TRACE_EVENT(btrfs_reserve_ticket, + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, + u64 start_ns, int flush, int error), + + TP_ARGS(fs_info, flags, bytes, start_ns, flush, error), + + TP_STRUCT__entry_btrfs( + __field( u64, flags ) + __field( u64, bytes ) + __field( u64, start_ns ) + __field( int, flush ) + __field( int, error ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->flags = flags; + __entry->bytes = bytes; + __entry->start_ns = start_ns; + __entry->flush = flush; + __entry->error = error; + ), + + TP_printk_btrfs("flags=%s bytes=%llu start_ns=%llu flush=%s error=%d", + __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS), + __entry->bytes, __entry->start_ns, + __print_symbolic(__entry->flush, FLUSH_ACTIONS), + __entry->error) +); + DECLARE_EVENT_CLASS(btrfs_sleep_tree_lock, TP_PROTO(const struct extent_buffer *eb, u64 start_ns), diff --git a/include/trace/events/error_report.h b/include/trace/events/error_report.h new file mode 100644 index 000000000000..96f64bf218b2 --- /dev/null +++ b/include/trace/events/error_report.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Declarations for error reporting tracepoints. + * + * Copyright (C) 2021, Google LLC. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM error_report + +#if !defined(_TRACE_ERROR_REPORT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ERROR_REPORT_H + +#include <linux/tracepoint.h> + +#ifndef __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum error_detector { + ERROR_DETECTOR_KFENCE, + ERROR_DETECTOR_KASAN +}; + +#endif /* __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY */ + +#define error_detector_list \ + EM(ERROR_DETECTOR_KFENCE, "kfence") \ + EMe(ERROR_DETECTOR_KASAN, "kasan") +/* Always end the list with an EMe. */ + +#undef EM +#undef EMe + +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +error_detector_list + +#undef EM +#undef EMe + +#define EM(a, b) { a, b }, +#define EMe(a, b) { a, b } + +#define show_error_detector_list(val) \ + __print_symbolic(val, error_detector_list) + +DECLARE_EVENT_CLASS(error_report_template, + TP_PROTO(enum error_detector error_detector, unsigned long id), + TP_ARGS(error_detector, id), + TP_STRUCT__entry(__field(enum error_detector, error_detector) + __field(unsigned long, id)), + TP_fast_assign(__entry->error_detector = error_detector; + __entry->id = id;), + TP_printk("[%s] %lx", + show_error_detector_list(__entry->error_detector), + __entry->id)); + +/** + * error_report_end - called after printing the error report + * @error_detector: short string describing the error detection tool + * @id: pseudo-unique descriptor identifying the report + * (e.g. the memory access address) + * + * This event occurs right after a debugging tool finishes printing the error + * report. + */ +DEFINE_EVENT(error_report_template, error_report_end, + TP_PROTO(enum error_detector error_detector, unsigned long id), + TP_ARGS(error_detector, id)); + +#endif /* _TRACE_ERROR_REPORT_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h index 112bd06487bf..e801f4910522 100644 --- a/include/trace/events/intel_iommu.h +++ b/include/trace/events/intel_iommu.h @@ -6,7 +6,6 @@ * * Author: Lu Baolu <baolu.lu@linux.intel.com> */ -#ifdef CONFIG_INTEL_IOMMU #undef TRACE_SYSTEM #define TRACE_SYSTEM intel_iommu @@ -135,8 +134,44 @@ DEFINE_EVENT(dma_map_sg, bounce_map_sg, struct scatterlist *sg), TP_ARGS(dev, index, total, sg) ); + +TRACE_EVENT(qi_submit, + TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), + + TP_ARGS(iommu, qw0, qw1, qw2, qw3), + + TP_STRUCT__entry( + __field(u64, qw0) + __field(u64, qw1) + __field(u64, qw2) + __field(u64, qw3) + __string(iommu, iommu->name) + ), + + TP_fast_assign( + __assign_str(iommu, iommu->name); + __entry->qw0 = qw0; + __entry->qw1 = qw1; + __entry->qw2 = qw2; + __entry->qw3 = qw3; + ), + + TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx", + __print_symbolic(__entry->qw0 & 0xf, + { QI_CC_TYPE, "cc_inv" }, + { QI_IOTLB_TYPE, "iotlb_inv" }, + { QI_DIOTLB_TYPE, "dev_tlb_inv" }, + { QI_IEC_TYPE, "iec_inv" }, + { QI_IWD_TYPE, "inv_wait" }, + { QI_EIOTLB_TYPE, "p_iotlb_inv" }, + { QI_PC_TYPE, "pc_inv" }, + { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" }, + { QI_PGRP_RESP_TYPE, "page_grp_resp" }), + __get_str(iommu), + __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3 + ) +); #endif /* _TRACE_INTEL_IOMMU_H */ /* This part must be outside protection */ #include <trace/define_trace.h> -#endif /* CONFIG_INTEL_IOMMU */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f65b1f6db22d..3a60b6b6db32 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -115,7 +115,7 @@ DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) ); -DECLARE_EVENT_CLASS(kmem_free, +TRACE_EVENT(kfree, TP_PROTO(unsigned long call_site, const void *ptr), @@ -135,18 +135,26 @@ DECLARE_EVENT_CLASS(kmem_free, (void *)__entry->call_site, __entry->ptr) ); -DEFINE_EVENT(kmem_free, kfree, +TRACE_EVENT(kmem_cache_free, - TP_PROTO(unsigned long call_site, const void *ptr), + TP_PROTO(unsigned long call_site, const void *ptr, const char *name), - TP_ARGS(call_site, ptr) -); + TP_ARGS(call_site, ptr, name), -DEFINE_EVENT(kmem_free, kmem_cache_free, + TP_STRUCT__entry( + __field( unsigned long, call_site ) + __field( const void *, ptr ) + __string( name, name ) + ), - TP_PROTO(unsigned long call_site, const void *ptr), + TP_fast_assign( + __entry->call_site = call_site; + __entry->ptr = ptr; + __assign_str(name, name); + ), - TP_ARGS(call_site, ptr) + TP_printk("call_site=%pS ptr=%p name=%s", + (void *)__entry->call_site, __entry->ptr, __get_str(name)) ); TRACE_EVENT(mm_page_free, diff --git a/include/trace/events/netlink.h b/include/trace/events/netlink.h new file mode 100644 index 000000000000..3b7be3b386a4 --- /dev/null +++ b/include/trace/events/netlink.h @@ -0,0 +1,29 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM netlink + +#if !defined(_TRACE_NETLINK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NETLINK_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(netlink_extack, + + TP_PROTO(const char *msg), + + TP_ARGS(msg), + + TP_STRUCT__entry( + __string( msg, msg ) + ), + + TP_fast_assign( + __assign_str(msg, msg); + ), + + TP_printk("msg=%s", __get_str(msg)) +); + +#endif /* _TRACE_NETLINK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h index 8fd1babae761..e1735fe7c76a 100644 --- a/include/trace/events/pagemap.h +++ b/include/trace/events/pagemap.h @@ -27,24 +27,21 @@ TRACE_EVENT(mm_lru_insertion, - TP_PROTO( - struct page *page, - int lru - ), + TP_PROTO(struct page *page), - TP_ARGS(page, lru), + TP_ARGS(page), TP_STRUCT__entry( __field(struct page *, page ) __field(unsigned long, pfn ) - __field(int, lru ) + __field(enum lru_list, lru ) __field(unsigned long, flags ) ), TP_fast_assign( __entry->page = page; __entry->pfn = page_to_pfn(page); - __entry->lru = lru; + __entry->lru = page_lru(page); __entry->flags = trace_pagemap_flags(page); ), diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 155b5cb43cfd..5fc29400e1a2 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -505,6 +505,32 @@ TRACE_EVENT_RCU(rcu_callback, __entry->qlen) ); +TRACE_EVENT_RCU(rcu_segcb_stats, + + TP_PROTO(struct rcu_segcblist *rs, const char *ctx), + + TP_ARGS(rs, ctx), + + TP_STRUCT__entry( + __field(const char *, ctx) + __array(unsigned long, gp_seq, RCU_CBLIST_NSEGS) + __array(long, seglen, RCU_CBLIST_NSEGS) + ), + + TP_fast_assign( + __entry->ctx = ctx; + memcpy(__entry->seglen, rs->seglen, RCU_CBLIST_NSEGS * sizeof(long)); + memcpy(__entry->gp_seq, rs->gp_seq, RCU_CBLIST_NSEGS * sizeof(unsigned long)); + + ), + + TP_printk("%s seglen: (DONE=%ld, WAIT=%ld, NEXT_READY=%ld, NEXT=%ld) " + "gp_seq: (DONE=%lu, WAIT=%lu, NEXT_READY=%lu, NEXT=%lu)", __entry->ctx, + __entry->seglen[0], __entry->seglen[1], __entry->seglen[2], __entry->seglen[3], + __entry->gp_seq[0], __entry->gp_seq[1], __entry->gp_seq[2], __entry->gp_seq[3]) + +); + /* * Tracepoint for the registration of a single RCU callback of the special * kvfree() form. The first argument is the RCU type, the second argument diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 76e85e16854b..c838e7ac1c2d 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,6 +60,51 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, received) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) { + __entry->received = 0; + __entry->vendor_err = wc->vendor_err; + } else { + __entry->received = wc->byte_len; + __entry->vendor_err = 0; + } + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err, + __entry->received + ) +); + +#define DEFINE_RECEIVE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_receive_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep @@ -838,7 +883,8 @@ TRACE_EVENT(xprtrdma_post_linv_err, ** Completion events **/ -DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); + DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); @@ -1790,7 +1836,7 @@ TRACE_EVENT(svcrdma_post_recv, ) ); -DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); +DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 6f89c27265f5..036eb1f5c133 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1603,6 +1603,7 @@ TRACE_EVENT(svc_process, __field(u32, vers) __field(u32, proc) __string(service, name) + __string(procedure, rqst->rq_procinfo->pc_name) __string(addr, rqst->rq_xprt ? rqst->rq_xprt->xpt_remotebuf : "(null)") ), @@ -1612,13 +1613,16 @@ TRACE_EVENT(svc_process, __entry->vers = rqst->rq_vers; __entry->proc = rqst->rq_proc; __assign_str(service, name); + __assign_str(procedure, rqst->rq_procinfo->pc_name); __assign_str(addr, rqst->rq_xprt ? rqst->rq_xprt->xpt_remotebuf : "(null)"); ), - TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u", + TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s", __get_str(addr), __entry->xid, - __get_str(service), __entry->vers, __entry->proc) + __get_str(service), __entry->vers, + __get_str(procedure) + ) ); DECLARE_EVENT_CLASS(svc_rqst_event, @@ -1874,6 +1878,7 @@ TRACE_EVENT(svc_stats_latency, TP_STRUCT__entry( __field(u32, xid) __field(unsigned long, execute) + __string(procedure, rqst->rq_procinfo->pc_name) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), @@ -1881,11 +1886,13 @@ TRACE_EVENT(svc_stats_latency, __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->execute = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_stime)); + __assign_str(procedure, rqst->rq_procinfo->pc_name); __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%s xid=0x%08x execute-us=%lu", - __get_str(addr), __entry->xid, __entry->execute) + TP_printk("addr=%s xid=0x%08x proc=%s execute-us=%lu", + __get_str(addr), __entry->xid, __get_str(procedure), + __entry->execute) ); DECLARE_EVENT_CLASS(svc_deferred_event, diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index cf97f6339acb..ba94857eea11 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -59,6 +59,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, __field(int, state) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) @@ -75,6 +76,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; @@ -86,7 +88,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s", + TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s", + show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, show_tcp_state_name(__entry->state)) @@ -125,6 +128,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk, __field(const void *, skaddr) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) @@ -140,6 +144,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk, __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; @@ -153,7 +158,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk, __entry->sock_cookie = sock_gen_cookie(sk); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx", + TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx", + show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, @@ -192,6 +198,7 @@ TRACE_EVENT(tcp_retransmit_synack, __field(const void *, req) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) @@ -207,6 +214,7 @@ TRACE_EVENT(tcp_retransmit_synack, __entry->sport = ireq->ir_num; __entry->dport = ntohs(ireq->ir_rmt_port); + __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = ireq->ir_loc_addr; @@ -218,7 +226,8 @@ TRACE_EVENT(tcp_retransmit_synack, ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", + TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", + show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6) @@ -238,6 +247,7 @@ TRACE_EVENT(tcp_probe, __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __field(__u32, mark) __field(__u16, data_len) __field(__u32, snd_nxt) @@ -264,6 +274,7 @@ TRACE_EVENT(tcp_probe, __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->mark = skb->mark; + __entry->family = sk->sk_family; __entry->data_len = skb->len - __tcp_hdrlen(th); __entry->snd_nxt = tp->snd_nxt; @@ -276,7 +287,8 @@ TRACE_EVENT(tcp_probe, __entry->sock_cookie = sock_gen_cookie(sk); ), - TP_printk("src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx", + TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx", + show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->mark, __entry->data_len, __entry->snd_nxt, __entry->snd_una, __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd, diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 0bd54a184391..e151477d645c 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -20,32 +20,51 @@ { SYNCHRONIZE_CACHE, "SYNC" }, \ { UNMAP, "UNMAP" }) -#define UFS_LINK_STATES \ - EM(UIC_LINK_OFF_STATE) \ - EM(UIC_LINK_ACTIVE_STATE) \ - EMe(UIC_LINK_HIBERN8_STATE) - -#define UFS_PWR_MODES \ - EM(UFS_ACTIVE_PWR_MODE) \ - EM(UFS_SLEEP_PWR_MODE) \ - EM(UFS_POWERDOWN_PWR_MODE) \ - EMe(UFS_DEEPSLEEP_PWR_MODE) - -#define UFSCHD_CLK_GATING_STATES \ - EM(CLKS_OFF) \ - EM(CLKS_ON) \ - EM(REQ_CLKS_OFF) \ - EMe(REQ_CLKS_ON) +#define UFS_LINK_STATES \ + EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \ + EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \ + EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE") + +#define UFS_PWR_MODES \ + EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \ + EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \ + EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \ + EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE") + +#define UFSCHD_CLK_GATING_STATES \ + EM(CLKS_OFF, "CLKS_OFF") \ + EM(CLKS_ON, "CLKS_ON") \ + EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \ + EMe(REQ_CLKS_ON, "REQ_CLKS_ON") + +#define UFS_CMD_TRACE_STRINGS \ + EM(UFS_CMD_SEND, "send_req") \ + EM(UFS_CMD_COMP, "complete_rsp") \ + EM(UFS_DEV_COMP, "dev_complete") \ + EM(UFS_QUERY_SEND, "query_send") \ + EM(UFS_QUERY_COMP, "query_complete") \ + EM(UFS_QUERY_ERR, "query_complete_err") \ + EM(UFS_TM_SEND, "tm_send") \ + EM(UFS_TM_COMP, "tm_complete") \ + EMe(UFS_TM_ERR, "tm_complete_err") + +#define UFS_CMD_TRACE_TSF_TYPES \ + EM(UFS_TSF_CDB, "CDB") \ + EM(UFS_TSF_OSF, "OSF") \ + EM(UFS_TSF_TM_INPUT, "TM_INPUT") \ + EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT") /* Enums require being exported to userspace, for user tool parsing */ #undef EM #undef EMe -#define EM(a) TRACE_DEFINE_ENUM(a); -#define EMe(a) TRACE_DEFINE_ENUM(a); +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); UFS_LINK_STATES; UFS_PWR_MODES; UFSCHD_CLK_GATING_STATES; +UFS_CMD_TRACE_STRINGS +UFS_CMD_TRACE_TSF_TYPES /* * Now redefine the EM() and EMe() macros to map the enums to the strings @@ -53,8 +72,13 @@ UFSCHD_CLK_GATING_STATES; */ #undef EM #undef EMe -#define EM(a) { a, #a }, -#define EMe(a) { a, #a } +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +#define show_ufs_cmd_trace_str(str_t) \ + __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS) +#define show_ufs_cmd_trace_tsf(tsf) \ + __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES) TRACE_EVENT(ufshcd_clk_gating, @@ -223,16 +247,16 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init, TP_ARGS(dev_name, err, usecs, dev_state, link_state)); TRACE_EVENT(ufshcd_command, - TP_PROTO(const char *dev_name, const char *str, unsigned int tag, - u32 doorbell, int transfer_len, u32 intr, u64 lba, - u8 opcode, u8 group_id), + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, + unsigned int tag, u32 doorbell, int transfer_len, u32 intr, + u64 lba, u8 opcode, u8 group_id), - TP_ARGS(dev_name, str, tag, doorbell, transfer_len, + TP_ARGS(dev_name, str_t, tag, doorbell, transfer_len, intr, lba, opcode, group_id), TP_STRUCT__entry( __string(dev_name, dev_name) - __string(str, str) + __field(enum ufs_trace_str_t, str_t) __field(unsigned int, tag) __field(u32, doorbell) __field(int, transfer_len) @@ -244,7 +268,7 @@ TRACE_EVENT(ufshcd_command, TP_fast_assign( __assign_str(dev_name, dev_name); - __assign_str(str, str); + __entry->str_t = str_t; __entry->tag = tag; __entry->doorbell = doorbell; __entry->transfer_len = transfer_len; @@ -256,22 +280,22 @@ TRACE_EVENT(ufshcd_command, TP_printk( "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x", - __get_str(str), __get_str(dev_name), __entry->tag, - __entry->doorbell, __entry->transfer_len, + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr, __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode), (u32)__entry->group_id ) ); TRACE_EVENT(ufshcd_uic_command, - TP_PROTO(const char *dev_name, const char *str, u32 cmd, + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd, u32 arg1, u32 arg2, u32 arg3), - TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3), + TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3), TP_STRUCT__entry( __string(dev_name, dev_name) - __string(str, str) + __field(enum ufs_trace_str_t, str_t) __field(u32, cmd) __field(u32, arg1) __field(u32, arg2) @@ -280,7 +304,7 @@ TRACE_EVENT(ufshcd_uic_command, TP_fast_assign( __assign_str(dev_name, dev_name); - __assign_str(str, str); + __entry->str_t = str_t; __entry->cmd = cmd; __entry->arg1 = arg1; __entry->arg2 = arg2; @@ -289,34 +313,38 @@ TRACE_EVENT(ufshcd_uic_command, TP_printk( "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", - __get_str(str), __get_str(dev_name), __entry->cmd, - __entry->arg1, __entry->arg2, __entry->arg3 + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3 ) ); TRACE_EVENT(ufshcd_upiu, - TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf), + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr, + void *tsf, enum ufs_trace_tsf_t tsf_t), - TP_ARGS(dev_name, str, hdr, tsf), + TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t), TP_STRUCT__entry( __string(dev_name, dev_name) - __string(str, str) + __field(enum ufs_trace_str_t, str_t) __array(unsigned char, hdr, 12) __array(unsigned char, tsf, 16) + __field(enum ufs_trace_tsf_t, tsf_t) ), TP_fast_assign( __assign_str(dev_name, dev_name); - __assign_str(str, str); + __entry->str_t = str_t; memcpy(__entry->hdr, hdr, sizeof(__entry->hdr)); memcpy(__entry->tsf, tsf, sizeof(__entry->tsf)); + __entry->tsf_t = tsf_t; ), TP_printk( - "%s: %s: HDR:%s, CDB:%s", - __get_str(str), __get_str(dev_name), + "%s: %s: HDR:%s, %s:%s", + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), __print_hex(__entry->hdr, sizeof(__entry->hdr)), + show_ufs_cmd_trace_tsf(__entry->tsf_t), __print_hex(__entry->tsf, sizeof(__entry->tsf)) ) ); diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 9b8ae961acc5..970cc2ea2850 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -30,7 +30,7 @@ TRACE_EVENT(workqueue_queue_work, TP_STRUCT__entry( __field( void *, work ) __field( void *, function) - __field( void *, workqueue) + __field( const char *, workqueue) __field( unsigned int, req_cpu ) __field( unsigned int, cpu ) ), @@ -38,12 +38,12 @@ TRACE_EVENT(workqueue_queue_work, TP_fast_assign( __entry->work = work; __entry->function = work->func; - __entry->workqueue = pwq->wq; + __entry->workqueue = pwq->wq->name; __entry->req_cpu = req_cpu; __entry->cpu = pwq->pool->cpu; ), - TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u", + TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u", __entry->work, __entry->function, __entry->workqueue, __entry->req_cpu, __entry->cpu) ); diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 7785961d82ba..8268bf747d6f 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -231,9 +231,11 @@ TRACE_MAKE_SYSTEM_STR(); * { * struct trace_seq *s = &iter->seq; * struct trace_event_raw_<call> *field; <-- defined in stage 1 - * struct trace_entry *entry; * struct trace_seq *p = &iter->tmp_seq; - * int ret; + * + * -------(for event)------- + * + * struct trace_entry *entry; * * entry = iter->ent; * @@ -245,14 +247,23 @@ TRACE_MAKE_SYSTEM_STR(); * field = (typeof(field))entry; * * trace_seq_init(p); - * ret = trace_seq_printf(s, "%s: ", <call>); - * if (ret) - * ret = trace_seq_printf(s, <TP_printk> "\n"); - * if (!ret) - * return TRACE_TYPE_PARTIAL_LINE; + * return trace_output_call(iter, <call>, <TP_printk> "\n"); * - * return TRACE_TYPE_HANDLED; - * } + * ------(or, for event class)------ + * + * int ret; + * + * field = (typeof(field))iter->ent; + * + * ret = trace_raw_output_prep(iter, trace_event); + * if (ret != TRACE_TYPE_HANDLED) + * return ret; + * + * trace_event_printf(iter, <TP_printk> "\n"); + * + * return trace_handle_return(s); + * ------- + * } * * This is the method used to print the raw event to the trace * output format. Note, this is not needed if the data is read @@ -364,7 +375,7 @@ trace_raw_output_##call(struct trace_iterator *iter, int flags, \ if (ret != TRACE_TYPE_HANDLED) \ return ret; \ \ - trace_seq_printf(s, print); \ + trace_event_printf(iter, print); \ \ return trace_handle_return(s); \ } \ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 728752917785..ce58cff99b66 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -861,9 +861,11 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2) __SYSCALL(__NR_process_madvise, sys_process_madvise) #define __NR_epoll_pwait2 441 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) +#define __NR_mount_setattr 442 +__SYSCALL(__NR_mount_setattr, sys_mount_setattr) #undef __NR_syscalls -#define __NR_syscalls 442 +#define __NR_syscalls 443 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 1c064627e6c3..d1a93d2a85f9 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -990,7 +990,7 @@ struct drm_format_modifier { }; /** - * struct drm_mode_create_blob - Create New block property + * struct drm_mode_create_blob - Create New blob property * * Create a new 'blob' data property, copying length bytes from data pointer, * and returning new blob ID. diff --git a/include/uapi/linux/acrn.h b/include/uapi/linux/acrn.h new file mode 100644 index 000000000000..353b2a2e4536 --- /dev/null +++ b/include/uapi/linux/acrn.h @@ -0,0 +1,580 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Userspace interface for /dev/acrn_hsm - ACRN Hypervisor Service Module + * + * This file can be used by applications that need to communicate with the HSM + * via the ioctl interface. + * + * Copyright (C) 2021 Intel Corporation. All rights reserved. + */ + +#ifndef _UAPI_ACRN_H +#define _UAPI_ACRN_H + +#include <linux/types.h> +#include <linux/uuid.h> + +#define ACRN_IO_REQUEST_MAX 16 + +#define ACRN_IOREQ_STATE_PENDING 0 +#define ACRN_IOREQ_STATE_COMPLETE 1 +#define ACRN_IOREQ_STATE_PROCESSING 2 +#define ACRN_IOREQ_STATE_FREE 3 + +#define ACRN_IOREQ_TYPE_PORTIO 0 +#define ACRN_IOREQ_TYPE_MMIO 1 +#define ACRN_IOREQ_TYPE_PCICFG 2 + +#define ACRN_IOREQ_DIR_READ 0 +#define ACRN_IOREQ_DIR_WRITE 1 + +/** + * struct acrn_mmio_request - Info of a MMIO I/O request + * @direction: Access direction of this request (ACRN_IOREQ_DIR_*) + * @reserved: Reserved for alignment and should be 0 + * @address: Access address of this MMIO I/O request + * @size: Access size of this MMIO I/O request + * @value: Read/write value of this MMIO I/O request + */ +struct acrn_mmio_request { + __u32 direction; + __u32 reserved; + __u64 address; + __u64 size; + __u64 value; +}; + +/** + * struct acrn_pio_request - Info of a PIO I/O request + * @direction: Access direction of this request (ACRN_IOREQ_DIR_*) + * @reserved: Reserved for alignment and should be 0 + * @address: Access address of this PIO I/O request + * @size: Access size of this PIO I/O request + * @value: Read/write value of this PIO I/O request + */ +struct acrn_pio_request { + __u32 direction; + __u32 reserved; + __u64 address; + __u64 size; + __u32 value; +}; + +/** + * struct acrn_pci_request - Info of a PCI I/O request + * @direction: Access direction of this request (ACRN_IOREQ_DIR_*) + * @reserved: Reserved for alignment and should be 0 + * @size: Access size of this PCI I/O request + * @value: Read/write value of this PIO I/O request + * @bus: PCI bus value of this PCI I/O request + * @dev: PCI device value of this PCI I/O request + * @func: PCI function value of this PCI I/O request + * @reg: PCI config space offset of this PCI I/O request + * + * Need keep same header layout with &struct acrn_pio_request. + */ +struct acrn_pci_request { + __u32 direction; + __u32 reserved[3]; + __u64 size; + __u32 value; + __u32 bus; + __u32 dev; + __u32 func; + __u32 reg; +}; + +/** + * struct acrn_io_request - 256-byte ACRN I/O request + * @type: Type of this request (ACRN_IOREQ_TYPE_*). + * @completion_polling: Polling flag. Hypervisor will poll completion of the + * I/O request if this flag set. + * @reserved0: Reserved fields. + * @reqs: Union of different types of request. Byte offset: 64. + * @reqs.pio_request: PIO request data of the I/O request. + * @reqs.pci_request: PCI configuration space request data of the I/O request. + * @reqs.mmio_request: MMIO request data of the I/O request. + * @reqs.data: Raw data of the I/O request. + * @reserved1: Reserved fields. + * @kernel_handled: Flag indicates this request need be handled in kernel. + * @processed: The status of this request (ACRN_IOREQ_STATE_*). + * + * The state transitions of ACRN I/O request: + * + * FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ... + * + * An I/O request in COMPLETE or FREE state is owned by the hypervisor. HSM and + * ACRN userspace are in charge of processing the others. + * + * On basis of the states illustrated above, a typical lifecycle of ACRN IO + * request would look like: + * + * Flow (assume the initial state is FREE) + * | + * | Service VM vCPU 0 Service VM vCPU x User vCPU y + * | + * | hypervisor: + * | fills in type, addr, etc. + * | pauses the User VM vCPU y + * | sets the state to PENDING (a) + * | fires an upcall to Service VM + * | + * | HSM: + * | scans for PENDING requests + * | sets the states to PROCESSING (b) + * | assigns the requests to clients (c) + * V + * | client: + * | scans for the assigned requests + * | handles the requests (d) + * | HSM: + * | sets states to COMPLETE + * | notifies the hypervisor + * | + * | hypervisor: + * | resumes User VM vCPU y (e) + * | + * | hypervisor: + * | post handling (f) + * V sets states to FREE + * + * Note that the procedures (a) to (f) in the illustration above require to be + * strictly processed in the order. One vCPU cannot trigger another request of + * I/O emulation before completing the previous one. + * + * Atomic and barriers are required when HSM and hypervisor accessing the state + * of &struct acrn_io_request. + * + */ +struct acrn_io_request { + __u32 type; + __u32 completion_polling; + __u32 reserved0[14]; + union { + struct acrn_pio_request pio_request; + struct acrn_pci_request pci_request; + struct acrn_mmio_request mmio_request; + __u64 data[8]; + } reqs; + __u32 reserved1; + __u32 kernel_handled; + __u32 processed; +} __attribute__((aligned(256))); + +struct acrn_io_request_buffer { + union { + struct acrn_io_request req_slot[ACRN_IO_REQUEST_MAX]; + __u8 reserved[4096]; + }; +}; + +/** + * struct acrn_ioreq_notify - The structure of ioreq completion notification + * @vmid: User VM ID + * @reserved: Reserved and should be 0 + * @vcpu: vCPU ID + */ +struct acrn_ioreq_notify { + __u16 vmid; + __u16 reserved; + __u32 vcpu; +}; + +/** + * struct acrn_vm_creation - Info to create a User VM + * @vmid: User VM ID returned from the hypervisor + * @reserved0: Reserved and must be 0 + * @vcpu_num: Number of vCPU in the VM. Return from hypervisor. + * @reserved1: Reserved and must be 0 + * @uuid: UUID of the VM. Pass to hypervisor directly. + * @vm_flag: Flag of the VM creating. Pass to hypervisor directly. + * @ioreq_buf: Service VM GPA of I/O request buffer. Pass to + * hypervisor directly. + * @cpu_affinity: CPU affinity of the VM. Pass to hypervisor directly. + * It's a bitmap which indicates CPUs used by the VM. + */ +struct acrn_vm_creation { + __u16 vmid; + __u16 reserved0; + __u16 vcpu_num; + __u16 reserved1; + guid_t uuid; + __u64 vm_flag; + __u64 ioreq_buf; + __u64 cpu_affinity; +}; + +/** + * struct acrn_gp_regs - General registers of a User VM + * @rax: Value of register RAX + * @rcx: Value of register RCX + * @rdx: Value of register RDX + * @rbx: Value of register RBX + * @rsp: Value of register RSP + * @rbp: Value of register RBP + * @rsi: Value of register RSI + * @rdi: Value of register RDI + * @r8: Value of register R8 + * @r9: Value of register R9 + * @r10: Value of register R10 + * @r11: Value of register R11 + * @r12: Value of register R12 + * @r13: Value of register R13 + * @r14: Value of register R14 + * @r15: Value of register R15 + */ +struct acrn_gp_regs { + __le64 rax; + __le64 rcx; + __le64 rdx; + __le64 rbx; + __le64 rsp; + __le64 rbp; + __le64 rsi; + __le64 rdi; + __le64 r8; + __le64 r9; + __le64 r10; + __le64 r11; + __le64 r12; + __le64 r13; + __le64 r14; + __le64 r15; +}; + +/** + * struct acrn_descriptor_ptr - Segment descriptor table of a User VM. + * @limit: Limit field. + * @base: Base field. + * @reserved: Reserved and must be 0. + */ +struct acrn_descriptor_ptr { + __le16 limit; + __le64 base; + __le16 reserved[3]; +} __attribute__ ((__packed__)); + +/** + * struct acrn_regs - Registers structure of a User VM + * @gprs: General registers + * @gdt: Global Descriptor Table + * @idt: Interrupt Descriptor Table + * @rip: Value of register RIP + * @cs_base: Base of code segment selector + * @cr0: Value of register CR0 + * @cr4: Value of register CR4 + * @cr3: Value of register CR3 + * @ia32_efer: Value of IA32_EFER MSR + * @rflags: Value of regsiter RFLAGS + * @reserved_64: Reserved and must be 0 + * @cs_ar: Attribute field of code segment selector + * @cs_limit: Limit field of code segment selector + * @reserved_32: Reserved and must be 0 + * @cs_sel: Value of code segment selector + * @ss_sel: Value of stack segment selector + * @ds_sel: Value of data segment selector + * @es_sel: Value of extra segment selector + * @fs_sel: Value of FS selector + * @gs_sel: Value of GS selector + * @ldt_sel: Value of LDT descriptor selector + * @tr_sel: Value of TSS descriptor selector + */ +struct acrn_regs { + struct acrn_gp_regs gprs; + struct acrn_descriptor_ptr gdt; + struct acrn_descriptor_ptr idt; + + __le64 rip; + __le64 cs_base; + __le64 cr0; + __le64 cr4; + __le64 cr3; + __le64 ia32_efer; + __le64 rflags; + __le64 reserved_64[4]; + + __le32 cs_ar; + __le32 cs_limit; + __le32 reserved_32[3]; + + __le16 cs_sel; + __le16 ss_sel; + __le16 ds_sel; + __le16 es_sel; + __le16 fs_sel; + __le16 gs_sel; + __le16 ldt_sel; + __le16 tr_sel; +}; + +/** + * struct acrn_vcpu_regs - Info of vCPU registers state + * @vcpu_id: vCPU ID + * @reserved: Reserved and must be 0 + * @vcpu_regs: vCPU registers state + * + * This structure will be passed to hypervisor directly. + */ +struct acrn_vcpu_regs { + __u16 vcpu_id; + __u16 reserved[3]; + struct acrn_regs vcpu_regs; +}; + +#define ACRN_MEM_ACCESS_RIGHT_MASK 0x00000007U +#define ACRN_MEM_ACCESS_READ 0x00000001U +#define ACRN_MEM_ACCESS_WRITE 0x00000002U +#define ACRN_MEM_ACCESS_EXEC 0x00000004U +#define ACRN_MEM_ACCESS_RWX (ACRN_MEM_ACCESS_READ | \ + ACRN_MEM_ACCESS_WRITE | \ + ACRN_MEM_ACCESS_EXEC) + +#define ACRN_MEM_TYPE_MASK 0x000007C0U +#define ACRN_MEM_TYPE_WB 0x00000040U +#define ACRN_MEM_TYPE_WT 0x00000080U +#define ACRN_MEM_TYPE_UC 0x00000100U +#define ACRN_MEM_TYPE_WC 0x00000200U +#define ACRN_MEM_TYPE_WP 0x00000400U + +/* Memory mapping types */ +#define ACRN_MEMMAP_RAM 0 +#define ACRN_MEMMAP_MMIO 1 + +/** + * struct acrn_vm_memmap - A EPT memory mapping info for a User VM. + * @type: Type of the memory mapping (ACRM_MEMMAP_*). + * Pass to hypervisor directly. + * @attr: Attribute of the memory mapping. + * Pass to hypervisor directly. + * @user_vm_pa: Physical address of User VM. + * Pass to hypervisor directly. + * @service_vm_pa: Physical address of Service VM. + * Pass to hypervisor directly. + * @vma_base: VMA address of Service VM. Pass to hypervisor directly. + * @len: Length of the memory mapping. + * Pass to hypervisor directly. + */ +struct acrn_vm_memmap { + __u32 type; + __u32 attr; + __u64 user_vm_pa; + union { + __u64 service_vm_pa; + __u64 vma_base; + }; + __u64 len; +}; + +/* Type of interrupt of a passthrough device */ +#define ACRN_PTDEV_IRQ_INTX 0 +#define ACRN_PTDEV_IRQ_MSI 1 +#define ACRN_PTDEV_IRQ_MSIX 2 +/** + * struct acrn_ptdev_irq - Interrupt data of a passthrough device. + * @type: Type (ACRN_PTDEV_IRQ_*) + * @virt_bdf: Virtual Bus/Device/Function + * @phys_bdf: Physical Bus/Device/Function + * @intx: Info of interrupt + * @intx.virt_pin: Virtual IOAPIC pin + * @intx.phys_pin: Physical IOAPIC pin + * @intx.is_pic_pin: Is PIC pin or not + * + * This structure will be passed to hypervisor directly. + */ +struct acrn_ptdev_irq { + __u32 type; + __u16 virt_bdf; + __u16 phys_bdf; + + struct { + __u32 virt_pin; + __u32 phys_pin; + __u32 is_pic_pin; + } intx; +}; + +/* Type of PCI device assignment */ +#define ACRN_PTDEV_QUIRK_ASSIGN (1U << 0) + +#define ACRN_PCI_NUM_BARS 6 +/** + * struct acrn_pcidev - Info for assigning or de-assigning a PCI device + * @type: Type of the assignment + * @virt_bdf: Virtual Bus/Device/Function + * @phys_bdf: Physical Bus/Device/Function + * @intr_line: PCI interrupt line + * @intr_pin: PCI interrupt pin + * @bar: PCI BARs. + * + * This structure will be passed to hypervisor directly. + */ +struct acrn_pcidev { + __u32 type; + __u16 virt_bdf; + __u16 phys_bdf; + __u8 intr_line; + __u8 intr_pin; + __u32 bar[ACRN_PCI_NUM_BARS]; +}; + +/** + * struct acrn_msi_entry - Info for injecting a MSI interrupt to a VM + * @msi_addr: MSI addr[19:12] with dest vCPU ID + * @msi_data: MSI data[7:0] with vector + */ +struct acrn_msi_entry { + __u64 msi_addr; + __u64 msi_data; +}; + +struct acrn_acpi_generic_address { + __u8 space_id; + __u8 bit_width; + __u8 bit_offset; + __u8 access_size; + __u64 address; +} __attribute__ ((__packed__)); + +/** + * struct acrn_cstate_data - A C state package defined in ACPI + * @cx_reg: Register of the C state object + * @type: Type of the C state object + * @latency: The worst-case latency to enter and exit this C state + * @power: The average power consumption when in this C state + */ +struct acrn_cstate_data { + struct acrn_acpi_generic_address cx_reg; + __u8 type; + __u32 latency; + __u64 power; +}; + +/** + * struct acrn_pstate_data - A P state package defined in ACPI + * @core_frequency: CPU frequency (in MHz). + * @power: Power dissipation (in milliwatts). + * @transition_latency: The worst-case latency in microseconds that CPU is + * unavailable during a transition from any P state to + * this P state. + * @bus_master_latency: The worst-case latency in microseconds that Bus Masters + * are prevented from accessing memory during a transition + * from any P state to this P state. + * @control: The value to be written to Performance Control Register + * @status: Transition status. + */ +struct acrn_pstate_data { + __u64 core_frequency; + __u64 power; + __u64 transition_latency; + __u64 bus_master_latency; + __u64 control; + __u64 status; +}; + +#define PMCMD_TYPE_MASK 0x000000ff +enum acrn_pm_cmd_type { + ACRN_PMCMD_GET_PX_CNT, + ACRN_PMCMD_GET_PX_DATA, + ACRN_PMCMD_GET_CX_CNT, + ACRN_PMCMD_GET_CX_DATA, +}; + +#define ACRN_IOEVENTFD_FLAG_PIO 0x01 +#define ACRN_IOEVENTFD_FLAG_DATAMATCH 0x02 +#define ACRN_IOEVENTFD_FLAG_DEASSIGN 0x04 +/** + * struct acrn_ioeventfd - Data to operate a &struct hsm_ioeventfd + * @fd: The fd of eventfd associated with a hsm_ioeventfd + * @flags: Logical-OR of ACRN_IOEVENTFD_FLAG_* + * @addr: The start address of IO range of ioeventfd + * @len: The length of IO range of ioeventfd + * @reserved: Reserved and should be 0 + * @data: Data for data matching + * + * Without flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl ACRN_IOCTL_IOEVENTFD + * creates a &struct hsm_ioeventfd with properties originated from &struct + * acrn_ioeventfd. With flag ACRN_IOEVENTFD_FLAG_DEASSIGN, ioctl + * ACRN_IOCTL_IOEVENTFD destroys the &struct hsm_ioeventfd matching the fd. + */ +struct acrn_ioeventfd { + __u32 fd; + __u32 flags; + __u64 addr; + __u32 len; + __u32 reserved; + __u64 data; +}; + +#define ACRN_IRQFD_FLAG_DEASSIGN 0x01 +/** + * struct acrn_irqfd - Data to operate a &struct hsm_irqfd + * @fd: The fd of eventfd associated with a hsm_irqfd + * @flags: Logical-OR of ACRN_IRQFD_FLAG_* + * @msi: Info of MSI associated with the irqfd + */ +struct acrn_irqfd { + __s32 fd; + __u32 flags; + struct acrn_msi_entry msi; +}; + +/* The ioctl type, documented in ioctl-number.rst */ +#define ACRN_IOCTL_TYPE 0xA2 + +/* + * Common IOCTL IDs definition for ACRN userspace + */ +#define ACRN_IOCTL_CREATE_VM \ + _IOWR(ACRN_IOCTL_TYPE, 0x10, struct acrn_vm_creation) +#define ACRN_IOCTL_DESTROY_VM \ + _IO(ACRN_IOCTL_TYPE, 0x11) +#define ACRN_IOCTL_START_VM \ + _IO(ACRN_IOCTL_TYPE, 0x12) +#define ACRN_IOCTL_PAUSE_VM \ + _IO(ACRN_IOCTL_TYPE, 0x13) +#define ACRN_IOCTL_RESET_VM \ + _IO(ACRN_IOCTL_TYPE, 0x15) +#define ACRN_IOCTL_SET_VCPU_REGS \ + _IOW(ACRN_IOCTL_TYPE, 0x16, struct acrn_vcpu_regs) + +#define ACRN_IOCTL_INJECT_MSI \ + _IOW(ACRN_IOCTL_TYPE, 0x23, struct acrn_msi_entry) +#define ACRN_IOCTL_VM_INTR_MONITOR \ + _IOW(ACRN_IOCTL_TYPE, 0x24, unsigned long) +#define ACRN_IOCTL_SET_IRQLINE \ + _IOW(ACRN_IOCTL_TYPE, 0x25, __u64) + +#define ACRN_IOCTL_NOTIFY_REQUEST_FINISH \ + _IOW(ACRN_IOCTL_TYPE, 0x31, struct acrn_ioreq_notify) +#define ACRN_IOCTL_CREATE_IOREQ_CLIENT \ + _IO(ACRN_IOCTL_TYPE, 0x32) +#define ACRN_IOCTL_ATTACH_IOREQ_CLIENT \ + _IO(ACRN_IOCTL_TYPE, 0x33) +#define ACRN_IOCTL_DESTROY_IOREQ_CLIENT \ + _IO(ACRN_IOCTL_TYPE, 0x34) +#define ACRN_IOCTL_CLEAR_VM_IOREQ \ + _IO(ACRN_IOCTL_TYPE, 0x35) + +#define ACRN_IOCTL_SET_MEMSEG \ + _IOW(ACRN_IOCTL_TYPE, 0x41, struct acrn_vm_memmap) +#define ACRN_IOCTL_UNSET_MEMSEG \ + _IOW(ACRN_IOCTL_TYPE, 0x42, struct acrn_vm_memmap) + +#define ACRN_IOCTL_SET_PTDEV_INTR \ + _IOW(ACRN_IOCTL_TYPE, 0x53, struct acrn_ptdev_irq) +#define ACRN_IOCTL_RESET_PTDEV_INTR \ + _IOW(ACRN_IOCTL_TYPE, 0x54, struct acrn_ptdev_irq) +#define ACRN_IOCTL_ASSIGN_PCIDEV \ + _IOW(ACRN_IOCTL_TYPE, 0x55, struct acrn_pcidev) +#define ACRN_IOCTL_DEASSIGN_PCIDEV \ + _IOW(ACRN_IOCTL_TYPE, 0x56, struct acrn_pcidev) + +#define ACRN_IOCTL_PM_GET_CPU_STATE \ + _IOWR(ACRN_IOCTL_TYPE, 0x60, __u64) + +#define ACRN_IOCTL_IOEVENTFD \ + _IOW(ACRN_IOCTL_TYPE, 0x70, struct acrn_ioeventfd) +#define ACRN_IOCTL_IRQFD \ + _IOW(ACRN_IOCTL_TYPE, 0x71, struct acrn_irqfd) + +#endif /* _UAPI_ACRN_H */ diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 9c8604c5b5f6..ea4692c339ce 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */ -/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors: +/* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index bdb317faa1dc..35dc016c9bb4 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: MIT */ -/* Copyright (C) 2016-2020 B.A.T.M.A.N. contributors: +/* Copyright (C) B.A.T.M.A.N. contributors: * * Matthias Schiffer */ diff --git a/include/uapi/linux/binfmts.h b/include/uapi/linux/binfmts.h index 689025d9c185..c6f9450efc12 100644 --- a/include/uapi/linux/binfmts.h +++ b/include/uapi/linux/binfmts.h @@ -18,4 +18,8 @@ struct pt_regs; /* sizeof(linux_binprm->buf) */ #define BINPRM_BUF_SIZE 256 +/* preserve argv0 for the interpreter */ +#define AT_FLAGS_PRESERVE_ARGV0_BIT 0 +#define AT_FLAGS_PRESERVE_ARGV0 (1 << AT_FLAGS_PRESERVE_ARGV0_BIT) + #endif /* _UAPI_LINUX_BINFMTS_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 77d7c1bb2923..79c893310492 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -19,7 +19,8 @@ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ -#define BPF_XADD 0xc0 /* exclusive add */ +#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ +#define BPF_XADD 0xc0 /* exclusive add - legacy name */ /* alu/jmp fields */ #define BPF_MOV 0xb0 /* mov reg to reg */ @@ -43,6 +44,11 @@ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ +/* atomic op type fields (stored in immediate) */ +#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ +#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ +#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ + /* Register numbers */ enum { BPF_REG_0 = 0, @@ -1650,22 +1656,30 @@ union bpf_attr { * networking traffic statistics as it provides a global socket * identifier that can be assumed unique. * Return - * A 8-byte long non-decreasing number on success, or 0 if the - * socket field is missing inside *skb*. + * A 8-byte long unique number on success, or 0 if the socket + * field is missing inside *skb*. * * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_addr** context. * Return - * A 8-byte long non-decreasing number. + * A 8-byte long unique number. * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return - * A 8-byte long non-decreasing number. + * A 8-byte long unique number. + * + * u64 bpf_get_socket_cookie(struct sock *sk) + * Description + * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts + * *sk*, but gets socket from a BTF **struct sock**. This helper + * also works for sleepable programs. + * Return + * A 8-byte long unique number or 0 if *sk* is NULL. * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Return @@ -2225,6 +2239,9 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * + * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU + * was exceeded and output params->mtu_result contains the MTU. + * * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. @@ -2448,7 +2465,7 @@ union bpf_attr { * running simultaneously. * * A user should care about the synchronization by himself. - * For example, by using the **BPF_STX_XADD** instruction to alter + * For example, by using the **BPF_ATOMIC** instructions to alter * the shared data. * Return * A pointer to the local storage area. @@ -2993,10 +3010,10 @@ union bpf_attr { * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: + * On success, returns the number of bytes that were written, + * including the terminal NUL. This makes this helper useful in + * tracing programs for reading strings, and more importantly to + * get its length at runtime. See the following snippet: * * :: * @@ -3024,7 +3041,7 @@ union bpf_attr { * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * Return - * On success, the strictly positive length of the string, + * On success, the strictly positive length of the output string, * including the trailing NUL character. On error, a negative * value. * @@ -3830,6 +3847,68 @@ union bpf_attr { * Return * A pointer to a struct socket on success or NULL if the file is * not a socket. + * + * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) + * Description + * Check ctx packet size against exceeding MTU of net device (based + * on *ifindex*). This helper will likely be used in combination + * with helpers that adjust/change the packet size. + * + * The argument *len_diff* can be used for querying with a planned + * size change. This allows to check MTU prior to changing packet + * ctx. Providing an *len_diff* adjustment that is larger than the + * actual packet size (resulting in negative packet size) will in + * principle not exceed the MTU, why it is not considered a + * failure. Other BPF-helpers are needed for performing the + * planned size change, why the responsability for catch a negative + * packet size belong in those helpers. + * + * Specifying *ifindex* zero means the MTU check is performed + * against the current net device. This is practical if this isn't + * used prior to redirect. + * + * The Linux kernel route table can configure MTUs on a more + * specific per route level, which is not provided by this helper. + * For route level MTU checks use the **bpf_fib_lookup**\ () + * helper. + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** for tc cls_act programs. + * + * The *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_MTU_CHK_SEGS** + * This flag will only works for *ctx* **struct sk_buff**. + * If packet context contains extra packet segment buffers + * (often knows as GSO skb), then MTU check is harder to + * check at this point, because in transmit path it is + * possible for the skb packet to get re-segmented + * (depending on net device features). This could still be + * a MTU violation, so this flag enables performing MTU + * check against segments, with a different violation + * return code to tell it apart. Check cannot use len_diff. + * + * On return *mtu_len* pointer contains the MTU value of the net + * device. Remember the net device configured MTU is the L3 size, + * which is returned here and XDP and TX length operate at L2. + * Helper take this into account for you, but remember when using + * MTU value in your BPF-code. On input *mtu_len* must be a valid + * pointer and be initialized (to zero), else verifier will reject + * BPF program. + * + * Return + * * 0 on success, and populate MTU value in *mtu_len* pointer. + * + * * < 0 if any input argument is invalid (*mtu_len* not updated) + * + * MTU violations return positive values, but also populate MTU + * value in *mtu_len* pointer, as this can be needed for + * implementing PMTU handing: + * + * * **BPF_MTU_CHK_RET_FRAG_NEEDED** + * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** + * */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3995,6 +4074,7 @@ union bpf_attr { FN(ktime_get_coarse_ns), \ FN(ima_inode_hash), \ FN(sock_from_file), \ + FN(check_mtu), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -4495,6 +4575,7 @@ struct bpf_prog_info { __aligned_u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; + __u64 recursion_misses; } __attribute__((aligned(8))); struct bpf_map_info { @@ -4975,9 +5056,13 @@ struct bpf_fib_lookup { __be16 sport; __be16 dport; - /* total length of packet from network header - used for MTU check */ - __u16 tot_len; + union { /* used for MTU check */ + /* input to lookup */ + __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ + /* output: MTU value */ + __u16 mtu_result; + }; /* input: L3 device index for lookup * output: device index from FIB lookup */ @@ -5023,6 +5108,17 @@ struct bpf_redir_neigh { }; }; +/* bpf_check_mtu flags*/ +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = (1U << 0), +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ + BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ +}; + enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ diff --git a/include/uapi/linux/ccs.h b/include/uapi/linux/ccs.h new file mode 100644 index 000000000000..2896d3bfdb5e --- /dev/null +++ b/include/uapi/linux/ccs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* Copyright (C) 2020 Intel Corporation */ + +#ifndef __UAPI_CCS_H__ +#define __UAPI_CCS_H__ + +#include <linux/v4l2-controls.h> + +#define V4L2_CID_CCS_ANALOGUE_GAIN_M0 (V4L2_CID_USER_CCS_BASE + 1) +#define V4L2_CID_CCS_ANALOGUE_GAIN_C0 (V4L2_CID_USER_CCS_BASE + 2) +#define V4L2_CID_CCS_ANALOGUE_GAIN_M1 (V4L2_CID_USER_CCS_BASE + 3) +#define V4L2_CID_CCS_ANALOGUE_GAIN_C1 (V4L2_CID_USER_CCS_BASE + 4) +#define V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN (V4L2_CID_USER_CCS_BASE + 5) +#define V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN (V4L2_CID_USER_CCS_BASE + 6) +#define V4L2_CID_CCS_SHADING_CORRECTION (V4L2_CID_USER_CCS_BASE + 8) +#define V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL (V4L2_CID_USER_CCS_BASE + 9) + +#endif diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h new file mode 100644 index 000000000000..3155382dfc9b --- /dev/null +++ b/include/uapi/linux/cxl_mem.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * CXL IOCTLs for Memory Devices + */ + +#ifndef _UAPI_CXL_MEM_H_ +#define _UAPI_CXL_MEM_H_ + +#include <linux/types.h> + +/** + * DOC: UAPI + * + * Not all of all commands that the driver supports are always available for use + * by userspace. Userspace must check the results from the QUERY command in + * order to determine the live set of commands. + */ + +#define CXL_MEM_QUERY_COMMANDS _IOR(0xCE, 1, struct cxl_mem_query_commands) +#define CXL_MEM_SEND_COMMAND _IOWR(0xCE, 2, struct cxl_send_command) + +#define CXL_CMDS \ + ___C(INVALID, "Invalid Command"), \ + ___C(IDENTIFY, "Identify Command"), \ + ___C(RAW, "Raw device command"), \ + ___C(GET_SUPPORTED_LOGS, "Get Supported Logs"), \ + ___C(GET_FW_INFO, "Get FW Info"), \ + ___C(GET_PARTITION_INFO, "Get Partition Information"), \ + ___C(GET_LSA, "Get Label Storage Area"), \ + ___C(GET_HEALTH_INFO, "Get Health Info"), \ + ___C(GET_LOG, "Get Log"), \ + ___C(MAX, "invalid / last command") + +#define ___C(a, b) CXL_MEM_COMMAND_ID_##a +enum { CXL_CMDS }; + +#undef ___C +#define ___C(a, b) { b } +static const struct { + const char *name; +} cxl_command_names[] = { CXL_CMDS }; + +/* + * Here's how this actually breaks out: + * cxl_command_names[] = { + * [CXL_MEM_COMMAND_ID_INVALID] = { "Invalid Command" }, + * [CXL_MEM_COMMAND_ID_IDENTIFY] = { "Identify Command" }, + * ... + * [CXL_MEM_COMMAND_ID_MAX] = { "invalid / last command" }, + * }; + */ + +#undef ___C + +/** + * struct cxl_command_info - Command information returned from a query. + * @id: ID number for the command. + * @flags: Flags that specify command behavior. + * @size_in: Expected input size, or -1 if variable length. + * @size_out: Expected output size, or -1 if variable length. + * + * Represents a single command that is supported by both the driver and the + * hardware. This is returned as part of an array from the query ioctl. The + * following would be a command that takes a variable length input and returns 0 + * bytes of output. + * + * - @id = 10 + * - @flags = 0 + * - @size_in = -1 + * - @size_out = 0 + * + * See struct cxl_mem_query_commands. + */ +struct cxl_command_info { + __u32 id; + + __u32 flags; +#define CXL_MEM_COMMAND_FLAG_MASK GENMASK(0, 0) + + __s32 size_in; + __s32 size_out; +}; + +/** + * struct cxl_mem_query_commands - Query supported commands. + * @n_commands: In/out parameter. When @n_commands is > 0, the driver will + * return min(num_support_commands, n_commands). When @n_commands + * is 0, driver will return the number of total supported commands. + * @rsvd: Reserved for future use. + * @commands: Output array of supported commands. This array must be allocated + * by userspace to be at least min(num_support_commands, @n_commands) + * + * Allow userspace to query the available commands supported by both the driver, + * and the hardware. Commands that aren't supported by either the driver, or the + * hardware are not returned in the query. + * + * Examples: + * + * - { .n_commands = 0 } // Get number of supported commands + * - { .n_commands = 15, .commands = buf } // Return first 15 (or less) + * supported commands + * + * See struct cxl_command_info. + */ +struct cxl_mem_query_commands { + /* + * Input: Number of commands to return (space allocated by user) + * Output: Number of commands supported by the driver/hardware + * + * If n_commands is 0, kernel will only return number of commands and + * not try to populate commands[], thus allowing userspace to know how + * much space to allocate + */ + __u32 n_commands; + __u32 rsvd; + + struct cxl_command_info __user commands[]; /* out: supported commands */ +}; + +/** + * struct cxl_send_command - Send a command to a memory device. + * @id: The command to send to the memory device. This must be one of the + * commands returned by the query command. + * @flags: Flags for the command (input). + * @raw: Special fields for raw commands + * @raw.opcode: Opcode passed to hardware when using the RAW command. + * @raw.rsvd: Must be zero. + * @rsvd: Must be zero. + * @retval: Return value from the memory device (output). + * @in: Parameters associated with input payload. + * @in.size: Size of the payload to provide to the device (input). + * @in.rsvd: Must be zero. + * @in.payload: Pointer to memory for payload input, payload is little endian. + * @out: Parameters associated with output payload. + * @out.size: Size of the payload received from the device (input/output). This + * field is filled in by userspace to let the driver know how much + * space was allocated for output. It is populated by the driver to + * let userspace know how large the output payload actually was. + * @out.rsvd: Must be zero. + * @out.payload: Pointer to memory for payload output, payload is little endian. + * + * Mechanism for userspace to send a command to the hardware for processing. The + * driver will do basic validation on the command sizes. In some cases even the + * payload may be introspected. Userspace is required to allocate large enough + * buffers for size_out which can be variable length in certain situations. + */ +struct cxl_send_command { + __u32 id; + __u32 flags; + union { + struct { + __u16 opcode; + __u16 rsvd; + } raw; + __u32 rsvd; + }; + __u32 retval; + + struct { + __s32 size; + __u32 rsvd; + __u64 payload; + } in; + + struct { + __s32 size; + __u32 rsvd; + __u64 payload; + } out; +}; + +#endif diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index cf89c318f2ac..f6008b2fa60f 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -200,6 +200,10 @@ enum devlink_port_flavour { DEVLINK_PORT_FLAVOUR_UNUSED, /* Port which exists in the switch, but * is not used in any way. */ + DEVLINK_PORT_FLAVOUR_PCI_SF, /* Represents eswitch port + * for the PCI SF. It is an internal + * port that faces the PCI SF. + */ }; enum devlink_param_cmode { @@ -529,6 +533,7 @@ enum devlink_attr { DEVLINK_ATTR_RELOAD_ACTION_INFO, /* nested */ DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */ + DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, @@ -578,9 +583,29 @@ enum devlink_resource_unit { enum devlink_port_function_attr { DEVLINK_PORT_FUNCTION_ATTR_UNSPEC, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */ + DEVLINK_PORT_FN_ATTR_STATE, /* u8 */ + DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */ __DEVLINK_PORT_FUNCTION_ATTR_MAX, DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 }; +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE, + DEVLINK_PORT_FN_STATE_ACTIVE, +}; + +/** + * enum devlink_port_fn_opstate - indicates operational state of the function + * @DEVLINK_PORT_FN_OPSTATE_ATTACHED: Driver is attached to the function. + * For graceful tear down of the function, after inactivation of the + * function, user should wait for operational state to turn DETACHED. + * @DEVLINK_PORT_FN_OPSTATE_DETACHED: Driver is detached from the function. + * It is safe to delete the port. + */ +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED, + DEVLINK_PORT_FN_OPSTATE_ATTACHED, +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 4933b6b67b85..fcff6669137b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -272,9 +272,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 43 +#define DM_VERSION_MINOR 44 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2020-10-01)" +#define DM_VERSION_EXTRA "-ioctl (2021-02-01)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index e2bf36e6964b..a286635ac9b8 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -227,6 +227,7 @@ enum { ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */ ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */ ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */ + ETHTOOL_A_LINKMODES_LANES, /* u32 */ /* add new constants above here */ __ETHTOOL_A_LINKMODES_CNT, diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 7e5b5c10a49c..5effa9832802 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -844,7 +844,7 @@ struct fw_cdev_queue_iso { * struct fw_cdev_start_iso - Start an isochronous transmission or reception * @cycle: Cycle in which to start I/O. If @cycle is greater than or * equal to 0, the I/O will start on that cycle. - * @sync: Determines the value to wait for for receive packets that have + * @sync: Determines the value to wait for receive packets that have * the %FW_CDEV_ISO_SYNC bit set * @tags: Tag filter bit mask. Only valid for isochronous reception. * Determines the tag values for which packets will be accepted. diff --git a/include/uapi/linux/fsl_mc.h b/include/uapi/linux/fsl_mc.h new file mode 100644 index 000000000000..e57451570033 --- /dev/null +++ b/include/uapi/linux/fsl_mc.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Management Complex (MC) userspace public interface + * + * Copyright 2021 NXP + * + */ +#ifndef _UAPI_FSL_MC_H_ +#define _UAPI_FSL_MC_H_ + +#include <linux/types.h> + +#define MC_CMD_NUM_OF_PARAMS 7 + +/** + * struct fsl_mc_command - Management Complex (MC) command structure + * @header: MC command header + * @params: MC command parameters + * + * Used by FSL_MC_SEND_MC_COMMAND + */ +struct fsl_mc_command { + __le64 header; + __le64 params[MC_CMD_NUM_OF_PARAMS]; +}; + +#define FSL_MC_SEND_CMD_IOCTL_TYPE 'R' +#define FSL_MC_SEND_CMD_IOCTL_SEQ 0xE0 + +#define FSL_MC_SEND_MC_COMMAND \ + _IOWR(FSL_MC_SEND_CMD_IOCTL_TYPE, FSL_MC_SEND_CMD_IOCTL_SEQ, \ + struct fsl_mc_command) + +#endif /* _UAPI_FSL_MC_H_ */ diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h index 33f44156f8ea..15384e22e331 100644 --- a/include/uapi/linux/fsverity.h +++ b/include/uapi/linux/fsverity.h @@ -83,7 +83,21 @@ struct fsverity_formatted_digest { __u8 digest[]; }; +#define FS_VERITY_METADATA_TYPE_MERKLE_TREE 1 +#define FS_VERITY_METADATA_TYPE_DESCRIPTOR 2 +#define FS_VERITY_METADATA_TYPE_SIGNATURE 3 + +struct fsverity_read_metadata_arg { + __u64 metadata_type; + __u64 offset; + __u64 length; + __u64 buf_ptr; + __u64 __reserved; +}; + #define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg) #define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest) +#define FS_IOC_READ_VERITY_METADATA \ + _IOWR('f', 135, struct fsverity_read_metadata_arg) #endif /* _UAPI_LINUX_FSVERITY_H */ diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h index 07e508e6691b..6ec4291bcc7a 100644 --- a/include/uapi/linux/gfs2_ondisk.h +++ b/include/uapi/linux/gfs2_ondisk.h @@ -47,7 +47,7 @@ #define GFS2_FORMAT_DE 1200 #define GFS2_FORMAT_QU 1500 /* These are part of the superblock */ -#define GFS2_FORMAT_FS 1801 +#define GFS2_FORMAT_FS 1802 #define GFS2_FORMAT_MULTI 1900 /* @@ -389,8 +389,9 @@ struct gfs2_leaf { #define GFS2_EATYPE_USR 1 #define GFS2_EATYPE_SYS 2 #define GFS2_EATYPE_SECURITY 3 +#define GFS2_EATYPE_TRUSTED 4 -#define GFS2_EATYPE_LAST 3 +#define GFS2_EATYPE_LAST 4 #define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST) #define GFS2_EAFLAG_LAST 0x01 /* last ea in block */ diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index e4eb0b8c5cf9..eaaea3d8e6b4 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * <linux/gpio.h> - userspace ABI for the GPIO character devices * @@ -212,7 +212,7 @@ struct gpio_v2_line_request { * @offset: the local offset on this GPIO chip, fill this in when * requesting the line information from the kernel * @num_attrs: the number of attributes in @attrs - * @flags: flags for the GPIO lines, with values from &enum + * @flags: flags for this GPIO line, with values from &enum * gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW, * %GPIO_V2_LINE_FLAG_OUTPUT etc, added together. * @attrs: the configuration attributes associated with the line diff --git a/include/uapi/linux/i2c-dev.h b/include/uapi/linux/i2c-dev.h index 85f8047afcf2..1c4cec4ddd84 100644 --- a/include/uapi/linux/i2c-dev.h +++ b/include/uapi/linux/i2c-dev.h @@ -1,25 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* - i2c-dev.h - i2c-bus driver, char device interface - - Copyright (C) 1995-97 Simon G. Vogl - Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. -*/ + * i2c-dev.h - I2C bus char device interface + * + * Copyright (C) 1995-97 Simon G. Vogl + * Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> + */ #ifndef _UAPI_LINUX_I2C_DEV_H #define _UAPI_LINUX_I2C_DEV_H diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h index f71a1751cacf..92326ebde350 100644 --- a/include/uapi/linux/i2c.h +++ b/include/uapi/linux/i2c.h @@ -1,29 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -/* ------------------------------------------------------------------------- */ -/* */ -/* i2c.h - definitions for the i2c-bus interface */ -/* */ -/* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-2000 Simon G. Vogl - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. */ -/* ------------------------------------------------------------------------- */ - -/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and - Frodo Looijaard <frodol@dds.nl> */ +/* + * i2c.h - definitions for the I2C bus interface + * + * Copyright (C) 1995-2000 Simon G. Vogl + * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and + * Frodo Looijaard <frodol@dds.nl> + */ #ifndef _UAPI_LINUX_I2C_H #define _UAPI_LINUX_I2C_H @@ -32,18 +14,41 @@ /** * struct i2c_msg - an I2C transaction segment beginning with START - * @addr: Slave address, either seven or ten bits. When this is a ten - * bit address, I2C_M_TEN must be set in @flags and the adapter - * must support I2C_FUNC_10BIT_ADDR. - * @flags: I2C_M_RD is handled by all adapters. No other flags may be - * provided unless the adapter exported the relevant I2C_FUNC_* - * flags through i2c_check_functionality(). - * @len: Number of data bytes in @buf being read from or written to the - * I2C slave address. For read transactions where I2C_M_RECV_LEN - * is set, the caller guarantees that this buffer can hold up to - * 32 bytes in addition to the initial length byte sent by the - * slave (plus, if used, the SMBus PEC); and this value will be - * incremented by the number of block data bytes received. + * + * @addr: Slave address, either 7 or 10 bits. When this is a 10 bit address, + * %I2C_M_TEN must be set in @flags and the adapter must support + * %I2C_FUNC_10BIT_ADDR. + * + * @flags: + * Supported by all adapters: + * %I2C_M_RD: read data (from slave to master). Guaranteed to be 0x0001! + * + * Optional: + * %I2C_M_DMA_SAFE: the buffer of this message is DMA safe. Makes only sense + * in kernelspace, because userspace buffers are copied anyway + * + * Only if I2C_FUNC_10BIT_ADDR is set: + * %I2C_M_TEN: this is a 10 bit chip address + * + * Only if I2C_FUNC_SMBUS_READ_BLOCK_DATA is set: + * %I2C_M_RECV_LEN: message length will be first received byte + * + * Only if I2C_FUNC_NOSTART is set: + * %I2C_M_NOSTART: skip repeated start sequence + + * Only if I2C_FUNC_PROTOCOL_MANGLING is set: + * %I2C_M_NO_RD_ACK: in a read message, master ACK/NACK bit is skipped + * %I2C_M_IGNORE_NAK: treat NACK from client as ACK + * %I2C_M_REV_DIR_ADDR: toggles the Rd/Wr bit + * %I2C_M_STOP: force a STOP condition after the message + * + * @len: Number of data bytes in @buf being read from or written to the I2C + * slave address. For read transactions where %I2C_M_RECV_LEN is set, the + * caller guarantees that this buffer can hold up to %I2C_SMBUS_BLOCK_MAX + * bytes in addition to the initial length byte sent by the slave (plus, + * if used, the SMBus PEC); and this value will be incremented by the number + * of block data bytes received. + * * @buf: The buffer into which data is read, or from which it's written. * * An i2c_msg is the low level representation of one segment of an I2C @@ -60,40 +65,36 @@ * group, it is followed by a STOP. Otherwise it is followed by the next * @i2c_msg transaction segment, beginning with a (repeated) START. * - * Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then + * Alternatively, when the adapter supports %I2C_FUNC_PROTOCOL_MANGLING then * passing certain @flags may have changed those standard protocol behaviors. * Those flags are only for use with broken/nonconforming slaves, and with - * adapters which are known to support the specific mangling options they - * need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR). + * adapters which are known to support the specific mangling options they need. */ struct i2c_msg { - __u16 addr; /* slave address */ + __u16 addr; __u16 flags; -#define I2C_M_RD 0x0001 /* read data, from slave to master */ - /* I2C_M_RD is guaranteed to be 0x0001! */ -#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ -#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */ - /* makes only sense in kernelspace */ - /* userspace buffers are copied anyway */ -#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ -#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */ -#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */ - __u16 len; /* msg length */ - __u8 *buf; /* pointer to msg data */ +#define I2C_M_RD 0x0001 /* guaranteed to be 0x0001! */ +#define I2C_M_TEN 0x0010 /* use only if I2C_FUNC_10BIT_ADDR */ +#define I2C_M_DMA_SAFE 0x0200 /* use only in kernel space */ +#define I2C_M_RECV_LEN 0x0400 /* use only if I2C_FUNC_SMBUS_READ_BLOCK_DATA */ +#define I2C_M_NO_RD_ACK 0x0800 /* use only if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_IGNORE_NAK 0x1000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_REV_DIR_ADDR 0x2000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_NOSTART 0x4000 /* use only if I2C_FUNC_NOSTART */ +#define I2C_M_STOP 0x8000 /* use only if I2C_FUNC_PROTOCOL_MANGLING */ + __u16 len; + __u8 *buf; }; /* To determine what functionality is present */ #define I2C_FUNC_I2C 0x00000001 -#define I2C_FUNC_10BIT_ADDR 0x00000002 -#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */ +#define I2C_FUNC_10BIT_ADDR 0x00000002 /* required for I2C_M_TEN */ +#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* required for I2C_M_IGNORE_NAK etc. */ #define I2C_FUNC_SMBUS_PEC 0x00000008 -#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */ +#define I2C_FUNC_NOSTART 0x00000010 /* required for I2C_M_NOSTART */ #define I2C_FUNC_SLAVE 0x00000020 -#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ +#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 or later */ #define I2C_FUNC_SMBUS_QUICK 0x00010000 #define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 #define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 @@ -102,11 +103,11 @@ struct i2c_msg { #define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 #define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 #define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 -#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 +#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 /* required for I2C_M_RECV_LEN */ #define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 #define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ #define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ -#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000 +#define I2C_FUNC_SMBUS_HOST_NOTIFY 0x10000000 /* SMBus 2.0 or later */ #define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ I2C_FUNC_SMBUS_WRITE_BYTE) @@ -128,6 +129,11 @@ struct i2c_msg { I2C_FUNC_SMBUS_I2C_BLOCK | \ I2C_FUNC_SMBUS_PEC) +/* if I2C_M_RECV_LEN is also supported */ +#define I2C_FUNC_SMBUS_EMUL_ALL (I2C_FUNC_SMBUS_EMUL | \ + I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ + I2C_FUNC_SMBUS_BLOCK_PROC_CALL) + /* * Data for SMBus Messages */ diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h index 45f3750aa861..e8eb4ad03cf1 100644 --- a/include/uapi/linux/if_bonding.h +++ b/include/uapi/linux/if_bonding.h @@ -94,6 +94,7 @@ #define BOND_XMIT_POLICY_LAYER23 2 /* layer 2+3 (IP ^ MAC) */ #define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */ #define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */ +#define BOND_XMIT_POLICY_VLAN_SRCMAC 5 /* vlan + source MAC */ /* 802.3ad port state definitions (43.4.2.2 in the 802.3ad standard) */ #define LACP_STATE_LACP_ACTIVITY 0x1 diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 82708c6db432..91c8dda6d95d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -525,6 +525,8 @@ enum { IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, IFLA_BRPORT_MRP_IN_OPEN, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 9a61c28ed3ae..ee3127461ee0 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -84,7 +84,7 @@ struct input_id { * in units per radian. * When INPUT_PROP_ACCELEROMETER is set the resolution changes. * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in - * in units per g (units/g) and in units per degree per second + * units per g (units/g) and in units per degree per second * (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ). */ struct input_absinfo { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d31a2a1e8ef9..2514eb6b1cf2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -262,6 +262,7 @@ struct io_uring_params { #define IORING_FEAT_POLL_32BITS (1U << 6) #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) #define IORING_FEAT_EXT_ARG (1U << 8) +#define IORING_FEAT_NATIVE_WORKERS (1U << 9) /* * io_uring_register(2) opcodes and arguments @@ -285,12 +286,22 @@ enum { IORING_REGISTER_LAST }; +/* deprecated, see struct io_uring_rsrc_update */ struct io_uring_files_update { __u32 offset; __u32 resv; __aligned_u64 /* __s32 * */ fds; }; +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __aligned_u64 data; +}; + +/* Skip updating fd indexes set to this value in the fd table */ +#define IORING_REGISTER_FILES_SKIP (-2) + #define IO_URING_OP_SUPPORTED (1U << 0) struct io_uring_probe_op { diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 13e8751bf24a..70603775fe91 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -189,6 +189,7 @@ enum { DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN, DEVCONF_NDISC_TCLASS, DEVCONF_RPL_SEG_ENABLED, + DEVCONF_RA_DEFRTR_METRIC, DEVCONF_MAX }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 374c67875cdb..f6afee209620 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -216,6 +216,20 @@ struct kvm_hyperv_exit { } u; }; +struct kvm_xen_exit { +#define KVM_EXIT_XEN_HCALL 1 + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + #define KVM_S390_GET_SKEYS_NONE 1 #define KVM_S390_SKEYS_MAX 1048576 @@ -252,6 +266,8 @@ struct kvm_hyperv_exit { #define KVM_EXIT_X86_WRMSR 30 #define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_AP_RESET_HOLD 32 +#define KVM_EXIT_X86_BUS_LOCK 33 +#define KVM_EXIT_XEN 34 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -428,6 +444,8 @@ struct kvm_run { __u32 index; /* kernel -> user */ __u64 data; /* kernel <-> user */ } msr; + /* KVM_EXIT_XEN */ + struct kvm_xen_exit xen; /* Fix the size of the union. */ char padding[256]; }; @@ -1058,6 +1076,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_CAP_X86_BUS_LOCK_EXIT 193 +#define KVM_CAP_PPC_DAWR1 194 #ifdef KVM_CAP_IRQ_ROUTING @@ -1131,6 +1151,11 @@ struct kvm_x86_mce { #endif #ifdef KVM_CAP_XEN_HVM +#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) +#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) +#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) +#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) + struct kvm_xen_hvm_config { __u32 flags; __u32 msr; @@ -1565,6 +1590,57 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_DIRTY_LOG_RING */ #define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +/* Per-VM Xen attributes */ +#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) +#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) + +struct kvm_xen_hvm_attr { + __u16 type; + __u16 pad[3]; + union { + __u8 long_mode; + __u8 vector; + struct { + __u64 gfn; + } shared_info; + __u64 pad[8]; + } u; +}; + +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ +#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 +#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 + +/* Per-vCPU Xen attributes */ +#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) +#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) + +struct kvm_xen_vcpu_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 gpa; + __u64 pad[8]; + struct { + __u64 state; + __u64 state_entry_time; + __u64 time_running; + __u64 time_runnable; + __u64 time_blocked; + __u64 time_offline; + } runstate; + } u; +}; + +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 +#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2 +#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 +#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 +#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1593,6 +1669,8 @@ enum sev_cmd_id { KVM_SEV_DBG_ENCRYPT, /* Guest certificates commands */ KVM_SEV_CERT_EXPORT, + /* Attestation report */ + KVM_SEV_GET_ATTESTATION_REPORT, KVM_SEV_NR_MAX, }; @@ -1645,6 +1723,12 @@ struct kvm_sev_dbg { __u32 len; }; +struct kvm_sev_attestation_report { + __u8 mnonce[16]; + __u64 uaddr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) @@ -1766,4 +1850,7 @@ struct kvm_dirty_gfn { __u64 offset; }; +#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) +#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 30c80d5ba4bf..bab8c9708611 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -145,6 +145,7 @@ enum { L2TP_ATTR_RX_ERRORS, /* u64 */ L2TP_ATTR_STATS_PAD, L2TP_ATTR_RX_COOKIE_DISCARDS, /* u64 */ + L2TP_ATTR_RX_INVALID, /* u64 */ __L2TP_ATTR_STATS_MAX, }; diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h index 13a06e5e966e..8b02088f96e3 100644 --- a/include/uapi/linux/map_to_7segment.h +++ b/include/uapi/linux/map_to_7segment.h @@ -45,17 +45,22 @@ * In device drivers it is recommended, if required, to make the char map * accessible via the sysfs interface using the following scheme: * - * static ssize_t show_map(struct device *dev, char *buf) { + * static ssize_t map_seg7_show(struct device *dev, + * struct device_attribute *attr, char *buf) + * { * memcpy(buf, &map_seg7, sizeof(map_seg7)); * return sizeof(map_seg7); * } - * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) { + * static ssize_t map_seg7_store(struct device *dev, + * struct device_attribute *attr, const char *buf, + * size_t cnt) + * { * if(cnt != sizeof(map_seg7)) * return -EINVAL; * memcpy(&map_seg7, buf, cnt); * return cnt; * } - * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map); + * static DEVICE_ATTR_RW(map_seg7); * * History: * 2005-05-31 RFC linux-kernel@vger.kernel.org diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 383ac7b7d8f0..200fa8462b90 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -127,6 +127,7 @@ struct media_device_info { #define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006) #define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007) #define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008) +#define MEDIA_ENT_F_PROC_VIDEO_ISP (MEDIA_ENT_F_BASE + 0x4009) /* * Switch and bridge entity functions diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 3354774af61e..8948467b3992 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -28,12 +28,14 @@ enum { /* Flags for set_mempolicy */ #define MPOL_F_STATIC_NODES (1 << 15) #define MPOL_F_RELATIVE_NODES (1 << 14) +#define MPOL_F_NUMA_BALANCING (1 << 13) /* Optimize with NUMA balancing if possible */ /* * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to * either set_mempolicy() or mbind(). */ -#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) +#define MPOL_MODE_FLAGS \ + (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_NUMA_BALANCING) /* Flags for get_mempolicy */ #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ diff --git a/include/uapi/linux/misc/bcm_vk.h b/include/uapi/linux/misc/bcm_vk.h new file mode 100644 index 000000000000..ec28e0bd46a9 --- /dev/null +++ b/include/uapi/linux/misc/bcm_vk.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright 2018-2020 Broadcom. + */ + +#ifndef __UAPI_LINUX_MISC_BCM_VK_H +#define __UAPI_LINUX_MISC_BCM_VK_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define BCM_VK_MAX_FILENAME 64 + +struct vk_image { + __u32 type; /* Type of image */ +#define VK_IMAGE_TYPE_BOOT1 1 /* 1st stage (load to SRAM) */ +#define VK_IMAGE_TYPE_BOOT2 2 /* 2nd stage (load to DDR) */ + __u8 filename[BCM_VK_MAX_FILENAME]; /* Filename of image */ +}; + +struct vk_reset { + __u32 arg1; + __u32 arg2; +}; + +#define VK_MAGIC 0x5e + +/* Load image to Valkyrie */ +#define VK_IOCTL_LOAD_IMAGE _IOW(VK_MAGIC, 0x2, struct vk_image) + +/* Send Reset to Valkyrie */ +#define VK_IOCTL_RESET _IOW(VK_MAGIC, 0x4, struct vk_reset) + +/* + * Firmware Status accessed directly via BAR space + */ +#define VK_BAR_FWSTS 0x41c +#define VK_BAR_COP_FWSTS 0x428 +/* VK_FWSTS definitions */ +#define VK_FWSTS_RELOCATION_ENTRY (1UL << 0) +#define VK_FWSTS_RELOCATION_EXIT (1UL << 1) +#define VK_FWSTS_INIT_START (1UL << 2) +#define VK_FWSTS_ARCH_INIT_DONE (1UL << 3) +#define VK_FWSTS_PRE_KNL1_INIT_DONE (1UL << 4) +#define VK_FWSTS_PRE_KNL2_INIT_DONE (1UL << 5) +#define VK_FWSTS_POST_KNL_INIT_DONE (1UL << 6) +#define VK_FWSTS_INIT_DONE (1UL << 7) +#define VK_FWSTS_APP_INIT_START (1UL << 8) +#define VK_FWSTS_APP_INIT_DONE (1UL << 9) +#define VK_FWSTS_MASK 0xffffffff +#define VK_FWSTS_READY (VK_FWSTS_INIT_START | \ + VK_FWSTS_ARCH_INIT_DONE | \ + VK_FWSTS_PRE_KNL1_INIT_DONE | \ + VK_FWSTS_PRE_KNL2_INIT_DONE | \ + VK_FWSTS_POST_KNL_INIT_DONE | \ + VK_FWSTS_INIT_DONE | \ + VK_FWSTS_APP_INIT_START | \ + VK_FWSTS_APP_INIT_DONE) +/* Deinit */ +#define VK_FWSTS_APP_DEINIT_START (1UL << 23) +#define VK_FWSTS_APP_DEINIT_DONE (1UL << 24) +#define VK_FWSTS_DRV_DEINIT_START (1UL << 25) +#define VK_FWSTS_DRV_DEINIT_DONE (1UL << 26) +#define VK_FWSTS_RESET_DONE (1UL << 27) +#define VK_FWSTS_DEINIT_TRIGGERED (VK_FWSTS_APP_DEINIT_START | \ + VK_FWSTS_APP_DEINIT_DONE | \ + VK_FWSTS_DRV_DEINIT_START | \ + VK_FWSTS_DRV_DEINIT_DONE) +/* Last nibble for reboot reason */ +#define VK_FWSTS_RESET_REASON_SHIFT 28 +#define VK_FWSTS_RESET_REASON_MASK (0xf << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_SYS_PWRUP (0x0 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_MBOX_DB (0x1 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_M7_WDOG (0x2 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_TEMP (0x3 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_PCI_FLR (0x4 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_PCI_HOT (0x5 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_PCI_WARM (0x6 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_PCI_COLD (0x7 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_L1 (0x8 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_L0 (0x9 << VK_FWSTS_RESET_REASON_SHIFT) +#define VK_FWSTS_RESET_UNKNOWN (0xf << VK_FWSTS_RESET_REASON_SHIFT) + +#endif /* __UAPI_LINUX_MISC_BCM_VK_H */ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index dd8306ea336c..e6524ead2b7b 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -1,6 +1,8 @@ #ifndef _UAPI_LINUX_MOUNT_H #define _UAPI_LINUX_MOUNT_H +#include <linux/types.h> + /* * These are the fs-independent mount-flags: up to 32 flags are supported * @@ -117,5 +119,19 @@ enum fsconfig_command { #define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */ #define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ #define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ +#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */ + +/* + * mount_setattr() + */ +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +/* List of all mount_attr versions. */ +#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ #endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 9762660df741..e1172c1ffdfd 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -36,6 +36,7 @@ enum { /* netlink interface */ #define MPTCP_PM_NAME "mptcp_pm" #define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds" +#define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events" #define MPTCP_PM_VER 0x1 /* @@ -82,6 +83,7 @@ enum { MPTCP_PM_CMD_FLUSH_ADDRS, MPTCP_PM_CMD_SET_LIMITS, MPTCP_PM_CMD_GET_LIMITS, + MPTCP_PM_CMD_SET_FLAGS, __MPTCP_PM_CMD_AFTER_LAST }; @@ -101,6 +103,81 @@ struct mptcp_info { __u64 mptcpi_write_seq; __u64 mptcpi_snd_una; __u64 mptcpi_rcv_nxt; + __u8 mptcpi_local_addr_used; + __u8 mptcpi_local_addr_max; }; +/* + * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport + * A new MPTCP connection has been created. It is the good time to allocate + * memory and send ADD_ADDR if needed. Depending on the traffic-patterns + * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. + * + * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport + * A MPTCP connection is established (can start new subflows). + * + * MPTCP_EVENT_CLOSED: token + * A MPTCP connection has stopped. + * + * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] + * A new address has been announced by the peer. + * + * MPTCP_EVENT_REMOVED: token, rem_id + * An address has been lost by the peer. + * + * MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, + * if_idx [, error] + * A new subflow has been established. 'error' should not be set. + * + * MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, backup, if_idx [, error] + * A subflow has been closed. An error (copy of sk_err) could be set if an + * error has been detected for this subflow. + * + * MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, backup, if_idx [, error] + * The priority of a subflow has changed. 'error' should not be set. + */ +enum mptcp_event_type { + MPTCP_EVENT_UNSPEC = 0, + MPTCP_EVENT_CREATED = 1, + MPTCP_EVENT_ESTABLISHED = 2, + MPTCP_EVENT_CLOSED = 3, + + MPTCP_EVENT_ANNOUNCED = 6, + MPTCP_EVENT_REMOVED = 7, + + MPTCP_EVENT_SUB_ESTABLISHED = 10, + MPTCP_EVENT_SUB_CLOSED = 11, + + MPTCP_EVENT_SUB_PRIORITY = 13, +}; + +enum mptcp_event_attr { + MPTCP_ATTR_UNSPEC = 0, + + MPTCP_ATTR_TOKEN, /* u32 */ + MPTCP_ATTR_FAMILY, /* u16 */ + MPTCP_ATTR_LOC_ID, /* u8 */ + MPTCP_ATTR_REM_ID, /* u8 */ + MPTCP_ATTR_SADDR4, /* be32 */ + MPTCP_ATTR_SADDR6, /* struct in6_addr */ + MPTCP_ATTR_DADDR4, /* be32 */ + MPTCP_ATTR_DADDR6, /* struct in6_addr */ + MPTCP_ATTR_SPORT, /* be16 */ + MPTCP_ATTR_DPORT, /* be16 */ + MPTCP_ATTR_BACKUP, /* u8 */ + MPTCP_ATTR_ERROR, /* u8 */ + MPTCP_ATTR_FLAGS, /* u16 */ + MPTCP_ATTR_TIMEOUT, /* u32 */ + MPTCP_ATTR_IF_IDX, /* s32 */ + + __MPTCP_ATTR_AFTER_LAST +}; + +#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) + #endif /* _UAPI_MPTCP_H */ diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h index 9744773de5ff..bd4424de56ff 100644 --- a/include/uapi/linux/mrp_bridge.h +++ b/include/uapi/linux/mrp_bridge.h @@ -71,90 +71,4 @@ enum br_mrp_sub_tlv_header_type { BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3, }; -struct br_mrp_tlv_hdr { - __u8 type; - __u8 length; -}; - -struct br_mrp_sub_tlv_hdr { - __u8 type; - __u8 length; -}; - -struct br_mrp_end_hdr { - struct br_mrp_tlv_hdr hdr; -}; - -struct br_mrp_common_hdr { - __be16 seq_id; - __u8 domain[MRP_DOMAIN_UUID_LENGTH]; -}; - -struct br_mrp_ring_test_hdr { - __be16 prio; - __u8 sa[ETH_ALEN]; - __be16 port_role; - __be16 state; - __be16 transitions; - __be32 timestamp; -}; - -struct br_mrp_ring_topo_hdr { - __be16 prio; - __u8 sa[ETH_ALEN]; - __be16 interval; -}; - -struct br_mrp_ring_link_hdr { - __u8 sa[ETH_ALEN]; - __be16 port_role; - __be16 interval; - __be16 blocked; -}; - -struct br_mrp_sub_opt_hdr { - __u8 type; - __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH]; -}; - -struct br_mrp_test_mgr_nack_hdr { - __be16 prio; - __u8 sa[ETH_ALEN]; - __be16 other_prio; - __u8 other_sa[ETH_ALEN]; -}; - -struct br_mrp_test_prop_hdr { - __be16 prio; - __u8 sa[ETH_ALEN]; - __be16 other_prio; - __u8 other_sa[ETH_ALEN]; -}; - -struct br_mrp_oui_hdr { - __u8 oui[MRP_OUI_LENGTH]; -}; - -struct br_mrp_in_test_hdr { - __be16 id; - __u8 sa[ETH_ALEN]; - __be16 port_role; - __be16 state; - __be16 transitions; - __be32 timestamp; -}; - -struct br_mrp_in_topo_hdr { - __u8 sa[ETH_ALEN]; - __be16 id; - __be16 interval; -}; - -struct br_mrp_in_link_hdr { - __u8 sa[ETH_ALEN]; - __be16 port_role; - __be16 id; - __be16 interval; -}; - #endif diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index b1633e7ba529..79bab7a36b30 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -164,7 +164,10 @@ enum nft_hook_attributes { */ enum nft_table_flags { NFT_TABLE_F_DORMANT = 0x1, + NFT_TABLE_F_OWNER = 0x2, }; +#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \ + NFT_TABLE_F_OWNER) /** * enum nft_table_attributes - nf_tables table netlink attributes @@ -173,6 +176,7 @@ enum nft_table_flags { * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32) * @NFTA_TABLE_USE: number of chains in this table (NLA_U32) * @NFTA_TABLE_USERDATA: user data (NLA_BINARY) + * @NFTA_TABLE_OWNER: owner of this table through netlink portID (NLA_U32) */ enum nft_table_attributes { NFTA_TABLE_UNSPEC, @@ -182,6 +186,7 @@ enum nft_table_attributes { NFTA_TABLE_HANDLE, NFTA_TABLE_PAD, NFTA_TABLE_USERDATA, + NFTA_TABLE_OWNER, __NFTA_TABLE_MAX }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h index a13137afc429..70af02092d16 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h +++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h @@ -5,7 +5,7 @@ #define NFCT_HELPER_STATUS_DISABLED 0 #define NFCT_HELPER_STATUS_ENABLED 1 -enum nfnl_acct_msg_types { +enum nfnl_cthelper_msg_types { NFNL_MSG_CTHELPER_NEW, NFNL_MSG_CTHELPER_GET, NFNL_MSG_CTHELPER_DEL, diff --git a/include/uapi/linux/nfs3.h b/include/uapi/linux/nfs3.h index 37e4b34e6b43..c22ab77713bd 100644 --- a/include/uapi/linux/nfs3.h +++ b/include/uapi/linux/nfs3.h @@ -63,6 +63,12 @@ enum nfs3_ftype { NF3BAD = 8 }; +enum nfs3_time_how { + DONT_CHANGE = 0, + SET_TO_SERVER_TIME = 1, + SET_TO_CLIENT_TIME = 2, +}; + struct nfs3_fh { unsigned short size; unsigned char data[NFS3_FHSIZE]; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 40832d13c2f1..ac78da99fccd 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1963,8 +1963,15 @@ enum nl80211_commands { * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire * probe-response frame. The DA field in the 802.11 header is zero-ed out, * to be filled by the FW. - * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable - * this feature. Currently, only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_VHT: Force VHT capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_HE: Force HE capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the * ATTR_HT_CAPABILITY to which attention should be paid. * Currently, only mac80211 NICs support this feature. @@ -3045,6 +3052,8 @@ enum nl80211_attrs { NL80211_ATTR_SAR_SPEC, + NL80211_ATTR_DISABLE_HE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h index 58b1eb711360..a5feb7604948 100644 --- a/include/uapi/linux/openat2.h +++ b/include/uapi/linux/openat2.h @@ -35,5 +35,9 @@ struct open_how { #define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." be scoped inside the dirfd (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ #endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index b15e3447cd9f..ad15e40d7f5d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -145,12 +145,14 @@ enum perf_event_sample_format { PERF_SAMPLE_CGROUP = 1U << 21, PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, + PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, - PERF_SAMPLE_MAX = 1U << 24, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; +#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) /* * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set * @@ -386,7 +388,8 @@ struct perf_event_attr { aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ - __reserved_1 : 30; + build_id : 1, /* use build id in mmap2 events */ + __reserved_1 : 29; union { __u32 wakeup_events; /* wakeup every n events */ @@ -659,6 +662,22 @@ struct perf_event_mmap_page { __u64 aux_size; }; +/* + * The current state of perf_event_header::misc bits usage: + * ('|' used bit, '-' unused bit) + * + * 012 CDEF + * |||---------|||| + * + * Where: + * 0-2 CPUMODE_MASK + * + * C PROC_MAP_PARSE_TIMEOUT + * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT + * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT + * F (reserved) + */ + #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0) @@ -690,6 +709,7 @@ struct perf_event_mmap_page { * * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events + * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event * * * PERF_RECORD_MISC_EXACT_IP: @@ -699,9 +719,13 @@ struct perf_event_mmap_page { * * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: * Indicates that thread was preempted in TASK_RUNNING state. + * + * PERF_RECORD_MISC_MMAP_BUILD_ID: + * Indicates that mmap2 event carries build id data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) +#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ @@ -890,7 +914,24 @@ enum perf_event_type { * char data[size]; * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * - * { u64 weight; } && PERF_SAMPLE_WEIGHT + * { union perf_sample_weight + * { + * u64 full; && PERF_SAMPLE_WEIGHT + * #if defined(__LITTLE_ENDIAN_BITFIELD) + * struct { + * u32 var1_dw; + * u16 var2_w; + * u16 var3_w; + * } && PERF_SAMPLE_WEIGHT_STRUCT + * #elif defined(__BIG_ENDIAN_BITFIELD) + * struct { + * u16 var3_w; + * u16 var2_w; + * u32 var1_dw; + * } && PERF_SAMPLE_WEIGHT_STRUCT + * #endif + * } + * } * { u64 data_src; } && PERF_SAMPLE_DATA_SRC * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * { u64 abi; # enum perf_sample_regs_abi @@ -915,10 +956,20 @@ enum perf_event_type { * u64 addr; * u64 len; * u64 pgoff; - * u32 maj; - * u32 min; - * u64 ino; - * u64 ino_generation; + * union { + * struct { + * u32 maj; + * u32 min; + * u64 ino; + * u64 ino_generation; + * }; + * struct { + * u8 build_id_size; + * u8 __reserved_1; + * u16 __reserved_2; + * u8 build_id[20]; + * }; + * }; * u32 prot, flags; * char filename[]; * struct sample_id sample_id; @@ -1127,14 +1178,16 @@ union perf_mem_data_src { mem_lvl_num:4, /* memory hierarchy level number */ mem_remote:1, /* remote */ mem_snoopx:2, /* snoop mode, ext */ - mem_rsvd:24; + mem_blk:3, /* access blocked */ + mem_rsvd:21; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:24, + __u64 mem_rsvd:21, + mem_blk:3, /* access blocked */ mem_snoopx:2, /* snoop mode, ext */ mem_remote:1, /* remote */ mem_lvl_num:4, /* memory hierarchy level number */ @@ -1217,6 +1270,12 @@ union perf_mem_data_src { #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ #define PERF_MEM_TLB_SHIFT 26 +/* Access blocked */ +#define PERF_MEM_BLK_NA 0x01 /* not available */ +#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ +#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ +#define PERF_MEM_BLK_SHIFT 40 + #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) @@ -1248,4 +1307,23 @@ struct perf_branch_entry { reserved:40; }; +union perf_sample_weight { + __u64 full; +#if defined(__LITTLE_ENDIAN_BITFIELD) + struct { + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; + }; +#elif defined(__BIG_ENDIAN_BITFIELD) + struct { + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; + }; +#else +#error "Unknown endianness" +#endif +}; + #endif /* _UAPI_LINUX_PERF_EVENT_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index ee95f42fb0ec..7ea59cfe1fa7 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -591,6 +591,9 @@ enum { TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */ TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */ TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */ + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */ + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */ + __TCA_FLOWER_KEY_CT_FLAGS_MAX, }; enum { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 9e7c2c607845..79a699f106b1 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -434,6 +434,7 @@ enum { TCA_HTB_RATE64, TCA_HTB_CEIL64, TCA_HTB_PAD, + TCA_HTB_OFFLOAD, __TCA_HTB_MAX, }; diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 90deb41c8a34..667f1aed091c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -251,5 +251,8 @@ struct prctl_mm_map { #define PR_SET_SYSCALL_USER_DISPATCH 59 # define PR_SYS_DISPATCH_OFF 0 # define PR_SYS_DISPATCH_ON 1 +/* The control values for the user space selector when dispatch is enabled */ +# define SYSCALL_DISPATCH_FILTER_ALLOW 0 +# define SYSCALL_DISPATCH_FILTER_BLOCK 1 #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h index 6e449e784260..36e3efb81b01 100644 --- a/include/uapi/linux/rkisp1-config.h +++ b/include/uapi/linux/rkisp1-config.h @@ -49,8 +49,14 @@ #define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100 #define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800 -#define RKISP1_CIF_ISP_AE_MEAN_MAX 25 -#define RKISP1_CIF_ISP_HIST_BIN_N_MAX 16 +#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25 +#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81 +#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12 + +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16 +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32 +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 + #define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3 #define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17 @@ -86,7 +92,9 @@ * Gamma out */ /* Maximum number of color samples supported */ -#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES 17 +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17 +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34 +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 /* * Lens shade correction @@ -102,8 +110,9 @@ /* * Histogram calculation */ -/* Last 3 values unused. */ -#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28 +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25 +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81 +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 /* * Defect Pixel Cluster Correction @@ -124,6 +133,21 @@ #define RKISP1_CIF_ISP_STAT_AFM (1U << 2) #define RKISP1_CIF_ISP_STAT_HIST (1U << 3) +/** + * enum rkisp1_cif_isp_version - ISP variants + * + * @RKISP1_V10: used at least in rk3288 and rk3399 + * @RKISP1_V11: declared in the original vendor code, but not used + * @RKISP1_V12: used at least in rk3326 and px30 + * @RKISP1_V13: used at least in rk1808 + */ +enum rkisp1_cif_isp_version { + RKISP1_V10 = 10, + RKISP1_V11, + RKISP1_V12, + RKISP1_V13, +}; + enum rkisp1_cif_isp_histogram_mode { RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE, RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED, @@ -510,6 +534,15 @@ enum rkisp1_cif_isp_goc_mode { * * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode) * @gamma_y: gamma out curve y-axis for all color components + * + * The number of entries of @gamma_y depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 + * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 + * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum + * of the two. */ struct rkisp1_cif_isp_goc_config { __u32 mode; @@ -524,6 +557,15 @@ struct rkisp1_cif_isp_goc_config { * skipped * @meas_window: coordinates of the measure window * @hist_weight: weighting factor for sub-windows + * + * The number of entries of @hist_weight depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 + * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 + * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum + * of the two. */ struct rkisp1_cif_isp_hst_config { __u32 mode; @@ -811,7 +853,15 @@ struct rkisp1_cif_isp_bls_meas_val { * @exp_mean: Mean luminance value of block xx * @bls_val: BLS measured values * - * Image is divided into 5x5 blocks. + * The number of entries of @exp_mean depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries, + * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries. + * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two. + * + * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12. */ struct rkisp1_cif_isp_ae_stat { __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX]; @@ -844,13 +894,29 @@ struct rkisp1_cif_isp_af_stat { /** * struct rkisp1_cif_isp_hist_stat - statistics histogram data * - * @hist_bins: measured bin counters + * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point + * type. Bits 0-4 are the fractional part and bits 5-19 are the + * integer part. + * + * The window of the measurements area is divided to 5x5 sub-windows for + * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for + * each sub-window independently and the final result is a weighted average of + * the histogram measurements on all sub-windows. The window of the + * measurements area and the weight of each sub-window are configurable using + * struct @rkisp1_cif_isp_hst_config. + * + * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13. + * + * The number of entries of @hist_bins depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. * - * Measurement window divided into 25 sub-windows, set - * with ISP_HIST_XXX + * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries, + * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries. + * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two. */ struct rkisp1_cif_isp_hist_stat { - __u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX]; + __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX]; }; /** diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h index 1dccb55cf8c6..708adddf9f13 100644 --- a/include/uapi/linux/rpl.h +++ b/include/uapi/linux/rpl.h @@ -28,10 +28,10 @@ struct ipv6_rpl_sr_hdr { pad:4, reserved1:16; #elif defined(__BIG_ENDIAN_BITFIELD) - __u32 reserved:20, + __u32 cmpri:4, + cmpre:4, pad:4, - cmpri:4, - cmpre:4; + reserved:20; #else #error "Please fix <asm/byteorder.h>" #endif diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h index fa9aff91cbf2..f950bff75e97 100644 --- a/include/uapi/linux/rtc.h +++ b/include/uapi/linux/rtc.h @@ -110,6 +110,11 @@ struct rtc_pll_info { #define RTC_AF 0x20 /* Alarm interrupt */ #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ +/* feature list */ +#define RTC_FEATURE_ALARM 0 +#define RTC_FEATURE_ALARM_RES_MINUTE 1 +#define RTC_FEATURE_NEED_WEEK_DAY 2 +#define RTC_FEATURE_CNT 3 #define RTC_MAX_FREQ 8192 diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index b841caa4657e..91e4ca064d61 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -319,6 +319,11 @@ enum rt_scope_t { #define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */ #define RTM_F_OFFLOAD 0x4000 /* route is offloaded */ #define RTM_F_TRAP 0x8000 /* route is trapping packets */ +#define RTM_F_OFFLOAD_FAILED 0x20000000 /* route offload failed, this value + * is chosen to avoid conflicts with + * other flags defined in + * include/uapi/linux/ipv6_route.h + */ /* Reserved table identifiers */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 62c22045fe65..c4042dcfdc0c 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -208,9 +208,6 @@ /* Atheros AR933X SoC */ #define PORT_AR933X 99 -/* Energy Micro efm32 SoC */ -#define PORT_EFMUART 100 - /* ARC (Synopsys) on-chip UART */ #define PORT_ARC 101 diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h new file mode 100644 index 000000000000..236a85f08ded --- /dev/null +++ b/include/uapi/linux/spi/spi.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _UAPI_SPI_H +#define _UAPI_SPI_H + +#include <linux/const.h> + +#define SPI_CPHA _BITUL(0) /* clock phase */ +#define SPI_CPOL _BITUL(1) /* clock polarity */ + +#define SPI_MODE_0 (0|0) /* (original MicroWire) */ +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA) + +#define SPI_CS_HIGH _BITUL(2) /* chipselect active high? */ +#define SPI_LSB_FIRST _BITUL(3) /* per-word bits-on-wire */ +#define SPI_3WIRE _BITUL(4) /* SI/SO signals shared */ +#define SPI_LOOP _BITUL(5) /* loopback mode */ +#define SPI_NO_CS _BITUL(6) /* 1 dev/bus, no chipselect */ +#define SPI_READY _BITUL(7) /* slave pulls low to pause */ +#define SPI_TX_DUAL _BITUL(8) /* transmit with 2 wires */ +#define SPI_TX_QUAD _BITUL(9) /* transmit with 4 wires */ +#define SPI_RX_DUAL _BITUL(10) /* receive with 2 wires */ +#define SPI_RX_QUAD _BITUL(11) /* receive with 4 wires */ +#define SPI_CS_WORD _BITUL(12) /* toggle cs after each word */ +#define SPI_TX_OCTAL _BITUL(13) /* transmit with 8 wires */ +#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */ +#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */ + +/* + * All the bits defined above should be covered by SPI_MODE_USER_MASK. + * The SPI_MODE_USER_MASK has the SPI_MODE_KERNEL_MASK counterpart in + * 'include/linux/spi/spi.h'. The bits defined here are from bit 0 upwards + * while in SPI_MODE_KERNEL_MASK they are from the other end downwards. + * These bits must not overlap. A static assert check should make sure of that. + * If adding extra bits, make sure to increase the bit index below as well. + */ +#define SPI_MODE_USER_MASK (_BITUL(16) - 1) + +#endif /* _UAPI_SPI_H */ diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h index d56427c0b3e0..0c3da08f2aff 100644 --- a/include/uapi/linux/spi/spidev.h +++ b/include/uapi/linux/spi/spidev.h @@ -25,35 +25,7 @@ #include <linux/types.h> #include <linux/ioctl.h> - -/* User space versions of kernel symbols for SPI clocking modes, - * matching <linux/spi/spi.h> - */ - -#define SPI_CPHA 0x01 -#define SPI_CPOL 0x02 - -#define SPI_MODE_0 (0|0) -#define SPI_MODE_1 (0|SPI_CPHA) -#define SPI_MODE_2 (SPI_CPOL|0) -#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) - -#define SPI_CS_HIGH 0x04 -#define SPI_LSB_FIRST 0x08 -#define SPI_3WIRE 0x10 -#define SPI_LOOP 0x20 -#define SPI_NO_CS 0x40 -#define SPI_READY 0x80 -#define SPI_TX_DUAL 0x100 -#define SPI_TX_QUAD 0x200 -#define SPI_RX_DUAL 0x400 -#define SPI_RX_QUAD 0x800 -#define SPI_CS_WORD 0x1000 -#define SPI_TX_OCTAL 0x2000 -#define SPI_RX_OCTAL 0x4000 -#define SPI_3WIRE_HIZ 0x8000 - -/*---------------------------------------------------------------------------*/ +#include <linux/spi/spi.h> /* IOCTL commands */ diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h new file mode 100644 index 000000000000..fbcce04abfe9 --- /dev/null +++ b/include/uapi/linux/surface_aggregator/cdev.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Surface System Aggregator Module (SSAM) user-space EC interface. + * + * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc + * device. This device provides direct user-space access to the SSAM EC. + * Intended for debugging and development. + * + * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H +#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * enum ssam_cdev_request_flags - Request flags for SSAM cdev request IOCTL. + * + * @SSAM_CDEV_REQUEST_HAS_RESPONSE: + * Specifies that the request expects a response. If not set, the request + * will be directly completed after its underlying packet has been + * transmitted. If set, the request transport system waits for a response + * of the request. + * + * @SSAM_CDEV_REQUEST_UNSEQUENCED: + * Specifies that the request should be transmitted via an unsequenced + * packet. If set, the request must not have a response, meaning that this + * flag and the %SSAM_CDEV_REQUEST_HAS_RESPONSE flag are mutually + * exclusive. + */ +enum ssam_cdev_request_flags { + SSAM_CDEV_REQUEST_HAS_RESPONSE = 0x01, + SSAM_CDEV_REQUEST_UNSEQUENCED = 0x02, +}; + +/** + * struct ssam_cdev_request - Controller request IOCTL argument. + * @target_category: Target category of the SAM request. + * @target_id: Target ID of the SAM request. + * @command_id: Command ID of the SAM request. + * @instance_id: Instance ID of the SAM request. + * @flags: Request flags (see &enum ssam_cdev_request_flags). + * @status: Request status (output). + * @payload: Request payload (input data). + * @payload.data: Pointer to request payload data. + * @payload.length: Length of request payload data (in bytes). + * @response: Request response (output data). + * @response.data: Pointer to response buffer. + * @response.length: On input: Capacity of response buffer (in bytes). + * On output: Length of request response (number of bytes + * in the buffer that are actually used). + */ +struct ssam_cdev_request { + __u8 target_category; + __u8 target_id; + __u8 command_id; + __u8 instance_id; + __u16 flags; + __s16 status; + + struct { + __u64 data; + __u16 length; + __u8 __pad[6]; + } payload; + + struct { + __u64 data; + __u16 length; + __u8 __pad[6]; + } response; +} __attribute__((__packed__)); + +#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request) + +#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 458179df9b27..1e05d3caa712 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -571,6 +571,7 @@ enum { NET_IPV6_ACCEPT_SOURCE_ROUTE=25, NET_IPV6_ACCEPT_RA_FROM_LOCAL=26, NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27, + NET_IPV6_RA_DEFRTR_METRIC=28, __NET_IPV6_MAX }; diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 13ceeb395eb8..8fc09e8638b3 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -51,7 +51,7 @@ struct tcphdr { fin:1; #else #error "Adjust your <asm/byteorder.h> defines" -#endif +#endif __be16 window; __sum16 check; __be16 urg_ptr; @@ -62,14 +62,14 @@ struct tcphdr { * (union is compatible to any of its members) * This means this part of the code is -fstrict-aliasing safe now. */ -union tcp_word_hdr { +union tcp_word_hdr { struct tcphdr hdr; - __be32 words[5]; -}; + __be32 words[5]; +}; -#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) +#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3]) -enum { +enum { TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), @@ -80,7 +80,7 @@ enum { TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) -}; +}; /* * TCP general constants @@ -103,8 +103,8 @@ enum { #define TCP_QUICKACK 12 /* Block/reenable quick acks */ #define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ -#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ -#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ +#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ +#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ #define TCP_REPAIR 19 /* TCP sock is under repair right now */ #define TCP_REPAIR_QUEUE 20 @@ -314,6 +314,7 @@ enum { TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */ + TCP_NLA_TTL, /* TTL or hop limit of a packet received */ }; /* for TCP_MD5SIG socket option */ @@ -353,5 +354,9 @@ struct tcp_zerocopy_receive { __u64 copybuf_address; /* in: copybuf address (small reads) */ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */ __u32 flags; /* in: flags */ + __u64 msg_control; /* ancillary data */ + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; /* set to 0 for now */ }; #endif /* _UAPI_LINUX_TCP_H */ diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index d67cadf221fc..25a6c534beb1 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -355,7 +355,7 @@ struct tee_iocl_supp_send_arg { }; /** - * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function + * TEE_IOC_SUPPL_SEND - Send a response to a received request * * Takes a struct tee_ioctl_buf_data which contains a struct * tee_iocl_supp_send_arg followed by any array of struct tee_param diff --git a/include/uapi/linux/termios.h b/include/uapi/linux/termios.h index 33961d4e4de0..e6da9d4433d1 100644 --- a/include/uapi/linux/termios.h +++ b/include/uapi/linux/termios.h @@ -5,19 +5,4 @@ #include <linux/types.h> #include <asm/termios.h> -#define NFF 5 - -struct termiox -{ - __u16 x_hflag; - __u16 x_cflag; - __u16 x_rflag[NFF]; - __u16 x_sflag; -}; - -#define RTSXOFF 0x0001 /* RTS flow control on input */ -#define CTSXON 0x0002 /* CTS flow control on output */ -#define DTRXOFF 0x0004 /* DTR flow control on input */ -#define DSRXON 0x0008 /* DCD flow control on output */ - #endif diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 0f865ae4ba89..17ce56198c9a 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -968,9 +968,22 @@ struct usb_ssp_cap_descriptor { __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */ #define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */ #define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */ +#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0 +#define USB_SSP_SUBLINK_SPEED_LSE_KBPS 1 +#define USB_SSP_SUBLINK_SPEED_LSE_MBPS 2 +#define USB_SSP_SUBLINK_SPEED_LSE_GBPS 3 + #define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */ +#define USB_SSP_SUBLINK_SPEED_ST_SYM_RX 0 +#define USB_SSP_SUBLINK_SPEED_ST_ASYM_RX 1 +#define USB_SSP_SUBLINK_SPEED_ST_SYM_TX 2 +#define USB_SSP_SUBLINK_SPEED_ST_ASYM_TX 3 + #define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */ #define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */ +#define USB_SSP_SUBLINK_SPEED_LP_SS 0 +#define USB_SSP_SUBLINK_SPEED_LP_SSP 1 + #define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ } __attribute__((packed)); diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h index fdd4d88a7b95..d791cc58a7f0 100644 --- a/include/uapi/linux/usb/tmc.h +++ b/include/uapi/linux/usb/tmc.h @@ -102,6 +102,9 @@ struct usbtmc_message { #define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8) #define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8) +#define USBTMC_IOCTL_GET_STB _IOR(USBTMC_IOC_NR, 26, __u8) +#define USBTMC_IOCTL_GET_SRQ_STB _IOR(USBTMC_IOC_NR, 27, __u8) + /* Cancel and cleanup asynchronous calls */ #define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35) #define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36) diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 823b214aac0c..039c0d7add1b 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -204,6 +204,11 @@ enum v4l2_colorfx { * We reserve 16 controls for this driver. */ #define V4L2_CID_USER_CODA_BASE (V4L2_CID_USER_BASE + 0x10e0) +/* + * The base for MIPI CCS driver controls. + * We reserve 128 controls for this driver. + */ +#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0) /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls @@ -422,6 +427,7 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_CODEC_BASE+227) #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228) #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229) +#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230) /* CIDs for the MPEG-2 Part 2 (H.262) codec */ #define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270) @@ -585,6 +591,15 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type { #define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+386) #define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+387) #define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+388) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+389) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+390) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE+391) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE+392) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE+393) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE+394) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE+395) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE+396) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE+397) #define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_CODEC_BASE+400) #define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_CODEC_BASE+401) #define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_CODEC_BASE+402) @@ -775,6 +790,13 @@ enum v4l2_mpeg_video_frame_skip_mode { V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2, }; +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 647) +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 648) +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 649) +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 650) +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651) +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652) + /* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ #define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000) #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0) diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 00850b98078a..a38454d9e0f5 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -176,7 +176,7 @@ struct v4l2_subdev_capability { }; /* The v4l2 sub-device video device node is registered in read-only mode. */ -#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0) +#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001 /* Backwards compatibility define --- to be removed */ #define v4l2_subdev_edid v4l2_edid diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h new file mode 100644 index 000000000000..66a41e4ec163 --- /dev/null +++ b/include/uapi/linux/vdpa.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * vdpa device management interface + * Copyright (c) 2020 Mellanox Technologies Ltd. All rights reserved. + */ + +#ifndef _UAPI_LINUX_VDPA_H_ +#define _UAPI_LINUX_VDPA_H_ + +#define VDPA_GENL_NAME "vdpa" +#define VDPA_GENL_VERSION 0x1 + +enum vdpa_command { + VDPA_CMD_UNSPEC, + VDPA_CMD_MGMTDEV_NEW, + VDPA_CMD_MGMTDEV_GET, /* can dump */ + VDPA_CMD_DEV_NEW, + VDPA_CMD_DEV_DEL, + VDPA_CMD_DEV_GET, /* can dump */ +}; + +enum vdpa_attr { + VDPA_ATTR_UNSPEC, + + /* bus name (optional) + dev name together make the parent device handle */ + VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */ + VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */ + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, /* u64 */ + + VDPA_ATTR_DEV_NAME, /* string */ + VDPA_ATTR_DEV_ID, /* u32 */ + VDPA_ATTR_DEV_VENDOR_ID, /* u32 */ + VDPA_ATTR_DEV_MAX_VQS, /* u32 */ + VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */ + + /* new attributes must be added above here */ + VDPA_ATTR_MAX, +}; + +#endif diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index d1812777139f..8ce36c1d53ca 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -46,6 +46,12 @@ */ #define VFIO_NOIOMMU_IOMMU 8 +/* Supports VFIO_DMA_UNMAP_FLAG_ALL */ +#define VFIO_UNMAP_ALL 9 + +/* Supports the vaddr flag for DMA map and unmap */ +#define VFIO_UPDATE_VADDR 10 + /* * The IOCTL interface is designed for extensibility by embedding the * structure length (argsz) and flags into structures passed between @@ -1074,12 +1080,22 @@ struct vfio_iommu_type1_info_dma_avail { * * Map process virtual addresses to IO virtual addresses using the * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. + * + * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and + * unblock translation of host virtual addresses in the iova range. The vaddr + * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To + * maintain memory consistency within the user application, the updated vaddr + * must address the same memory object as originally mapped. Failure to do so + * will result in user memory corruption and/or device misbehavior. iova and + * size must match those in the original MAP_DMA call. Protection is not + * changed, and the READ & WRITE flags must be 0. */ struct vfio_iommu_type1_dma_map { __u32 argsz; __u32 flags; #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */ #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */ +#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2) __u64 vaddr; /* Process virtual address */ __u64 iova; /* IO virtual address */ __u64 size; /* Size of mapping (bytes) */ @@ -1102,6 +1118,7 @@ struct vfio_bitmap { * field. No guarantee is made to the user that arbitrary unmaps of iova * or size different from those used in the original mapping call will * succeed. + * * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap * before unmapping IO virtual addresses. When this flag is set, the user must * provide a struct vfio_bitmap in data[]. User must provide zero-allocated @@ -1111,11 +1128,21 @@ struct vfio_bitmap { * indicates that the page at that offset from iova is dirty. A Bitmap of the * pages in the range of unmapped size is returned in the user-provided * vfio_bitmap.data. + * + * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses. iova and size + * must be 0. This cannot be combined with the get-dirty-bitmap flag. + * + * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host + * virtual addresses in the iova range. Tasks that attempt to translate an + * iova's vaddr will block. DMA to already-mapped pages continues. This + * cannot be combined with the get-dirty-bitmap flag. */ struct vfio_iommu_type1_dma_unmap { __u32 argsz; __u32 flags; #define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0) +#define VFIO_DMA_UNMAP_FLAG_ALL (1 << 1) +#define VFIO_DMA_UNMAP_FLAG_VADDR (1 << 2) __u64 iova; /* IO virtual address */ __u64 size; /* Size of mapping (bytes) */ __u8 data[]; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index dba3827c43ca..5a86b521a450 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -309,7 +309,9 @@ struct hl_info_hw_ip_info { __u32 num_of_events; __u32 device_id; /* PCI Device ID */ __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ - __u32 reserved[2]; + __u32 reserved; + __u16 first_available_interrupt_id; + __u16 reserved2; __u32 cpld_version; __u32 psoc_pci_pll_nr; __u32 psoc_pci_pll_nf; @@ -320,6 +322,8 @@ struct hl_info_hw_ip_info { __u8 pad[2]; __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; + __u64 reserved3; + __u64 dram_page_size; }; struct hl_info_dram_usage { @@ -327,6 +331,8 @@ struct hl_info_dram_usage { __u64 ctx_dram_mem; }; +#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2 + struct hl_info_hw_idle { __u32 is_idle; /* @@ -339,7 +345,7 @@ struct hl_info_hw_idle { * Extended Bitmask of busy engines. * Bits definition is according to `enum <chip>_enging_id'. */ - __u64 busy_engines_mask_ext; + __u64 busy_engines_mask_ext[HL_BUSY_ENGINES_MASK_EXT_SIZE]; }; struct hl_info_device_status { @@ -408,10 +414,13 @@ struct hl_pll_frequency_info { * struct hl_info_sync_manager - sync manager information * @first_available_sync_object: first available sob * @first_available_monitor: first available monitor + * @first_available_cq: first available cq */ struct hl_info_sync_manager { __u32 first_available_sync_object; __u32 first_available_monitor; + __u32 first_available_cq; + __u32 reserved; }; /** @@ -604,11 +613,14 @@ struct hl_cs_chunk { }; /* SIGNAL and WAIT/COLLECTIVE_WAIT flags are mutually exclusive */ -#define HL_CS_FLAGS_FORCE_RESTORE 0x1 -#define HL_CS_FLAGS_SIGNAL 0x2 -#define HL_CS_FLAGS_WAIT 0x4 -#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 -#define HL_CS_FLAGS_TIMESTAMP 0x20 +#define HL_CS_FLAGS_FORCE_RESTORE 0x1 +#define HL_CS_FLAGS_SIGNAL 0x2 +#define HL_CS_FLAGS_WAIT 0x4 +#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 +#define HL_CS_FLAGS_TIMESTAMP 0x20 +#define HL_CS_FLAGS_STAGED_SUBMISSION 0x40 +#define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 +#define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100 #define HL_CS_STATUS_SUCCESS 0 @@ -622,10 +634,17 @@ struct hl_cs_in { /* holds address of array of hl_cs_chunk for execution phase */ __u64 chunks_execute; - /* this holds address of array of hl_cs_chunk for store phase - - * Currently not in use - */ - __u64 chunks_store; + union { + /* this holds address of array of hl_cs_chunk for store phase - + * Currently not in use + */ + __u64 chunks_store; + + /* Sequence number of a staged submission CS + * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set + */ + __u64 seq; + }; /* Number of chunks in restore phase array. Maximum number is * HL_MAX_JOBS_PER_CS @@ -704,6 +723,8 @@ union hl_wait_cs_args { #define HL_MEM_OP_MAP 2 /* Opcode to unmap previously mapped host and device memory */ #define HL_MEM_OP_UNMAP 3 +/* Opcode to map a hw block */ +#define HL_MEM_OP_MAP_BLOCK 4 /* Memory flags */ #define HL_MEM_CONTIGUOUS 0x1 @@ -758,6 +779,17 @@ struct hl_mem_in { __u64 mem_size; } map_host; + /* HL_MEM_OP_MAP_BLOCK - map a hw block */ + struct { + /* + * HW block address to map, a handle and size will be + * returned to the user and will be used to mmap the + * relevant block. Only addresses from configuration + * space are allowed. + */ + __u64 block_addr; + } map_block; + /* HL_MEM_OP_UNMAP - unmap host memory */ struct { /* Virtual address returned from HL_MEM_OP_MAP */ @@ -784,10 +816,26 @@ struct hl_mem_out { __u64 device_virt_addr; /* - * Used for HL_MEM_OP_ALLOC. This is the assigned - * handle for the allocated memory + * Used in HL_MEM_OP_ALLOC + * This is the assigned handle for the allocated memory */ __u64 handle; + + struct { + /* + * Used in HL_MEM_OP_MAP_BLOCK. + * This is the assigned handle for the mapped block + */ + __u64 block_handle; + + /* + * Used in HL_MEM_OP_MAP_BLOCK + * This is the size of the mapped block + */ + __u32 block_size; + + __u32 pad; + }; }; }; diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 7968a1845355..dafc7ebe545b 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2020, Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -251,6 +252,7 @@ enum uverbs_methods_mr { UVERBS_METHOD_MR_DESTROY, UVERBS_METHOD_ADVISE_MR, UVERBS_METHOD_QUERY_MR, + UVERBS_METHOD_REG_DMABUF_MR, }; enum uverbs_attrs_mr_destroy_ids { @@ -272,6 +274,18 @@ enum uverbs_attrs_query_mr_cmd_attr_ids { UVERBS_ATTR_QUERY_MR_RESP_IOVA, }; +enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids { + UVERBS_ATTR_REG_DMABUF_MR_HANDLE, + UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE, + UVERBS_ATTR_REG_DMABUF_MR_OFFSET, + UVERBS_ATTR_REG_DMABUF_MR_LENGTH, + UVERBS_ATTR_REG_DMABUF_MR_IOVA, + UVERBS_ATTR_REG_DMABUF_MR_FD, + UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS, + UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY, + UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY, +}; + enum uverbs_attrs_create_counters_cmd_attr_ids { UVERBS_ATTR_CREATE_COUNTERS_HANDLE, }; diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index f8b638c73371..901a4fd72c09 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -133,6 +133,13 @@ enum pvrdma_wc_flags { PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE, }; +enum pvrdma_network_type { + PVRDMA_NETWORK_IB, + PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB, + PVRDMA_NETWORK_IPV4, + PVRDMA_NETWORK_IPV6 +}; + struct pvrdma_alloc_ucontext_resp { __u32 qp_tab_size; __u32 reserved; diff --git a/include/xen/events.h b/include/xen/events.h index 8ec418e30c7f..c204262d9fc2 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -12,10 +12,11 @@ #include <asm/xen/hypercall.h> #include <asm/xen/events.h> +struct xenbus_device; + unsigned xen_evtchn_nr_channels(void); int bind_evtchn_to_irq(evtchn_port_t evtchn); -int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, @@ -35,9 +36,9 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned long irqflags, const char *devname, void *dev_id); -int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, +int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev, evtchn_port_t remote_port); -int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, +int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index b9c937b3a149..cb854df031ce 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -50,6 +50,13 @@ #include <linux/page-flags.h> #include <linux/kernel.h> +/* + * Technically there's no reliably invalid grant reference or grant handle, + * so pick the value that is the most unlikely one to be observed valid. + */ +#define INVALID_GRANT_REF ((grant_ref_t)-1) +#define INVALID_GRANT_HANDLE ((grant_handle_t)-1) + #define GNTTAB_RESERVED_XENSTORE 1 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ @@ -157,6 +164,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, map->flags = flags; map->ref = ref; map->dom = domid; + map->status = 1; /* arbitrary positive value */ } static inline void diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 8bfb242f433e..5ee37a296481 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -598,7 +598,9 @@ struct shared_info { * their gettimeofday() syscall on this wallclock-base value. */ struct pvclock_wall_clock wc; - +#ifndef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif struct arch_shared_info arch; }; diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 2c43b0ef1e4d..b94074c82772 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -51,7 +51,6 @@ #define XENBUS_MAX_RING_GRANT_ORDER 4 #define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER) -#define INVALID_GRANT_HANDLE (~0U) /* Register callback to watch this node. */ struct xenbus_watch @@ -88,6 +87,13 @@ struct xenbus_device { struct completion down; struct work_struct work; struct semaphore reclaim_sem; + + /* Event channel based statistics and settings. */ + atomic_t event_channels; + atomic_t events; + atomic_t spurious_events; + atomic_t jiffies_eoi_delayed; + unsigned int spurious_threshold; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) @@ -192,8 +198,6 @@ void xs_suspend_cancel(void); struct work_struct; -void xenbus_probe(void); - #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ |