diff options
Diffstat (limited to 'include')
1055 files changed, 34370 insertions, 16432 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 18197c16149f..3e8d969b22fe 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -207,4 +207,14 @@ struct acpi_pld_info { #define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) #define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */ +/* Panel position defined in _PLD section of ACPI Specification 6.3 */ + +#define ACPI_PLD_PANEL_TOP 0 +#define ACPI_PLD_PANEL_BOTTOM 1 +#define ACPI_PLD_PANEL_LEFT 2 +#define ACPI_PLD_PANEL_RIGHT 3 +#define ACPI_PLD_PANEL_FRONT 4 +#define ACPI_PLD_PANEL_BACK 5 +#define ACPI_PLD_PANEL_UNKNOWN 6 + #endif /* ACBUFFER_H */ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index e92f84fa8c68..0362cbb72359 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -188,6 +188,8 @@ #define ACPI_MAX_GSBUS_DATA_SIZE 255 #define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE +#define ACPI_PRM_INPUT_BUFFER_SIZE 26 + /* _sx_d and _sx_w control methods */ #define ACPI_NUM_sx_d_METHODS 4 diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index a2bc381c7ce7..30869ab77fba 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -20,6 +20,7 @@ #define METHOD_NAME__CLS "_CLS" #define METHOD_NAME__CRS "_CRS" #define METHOD_NAME__DDN "_DDN" +#define METHOD_NAME__DIS "_DIS" #define METHOD_NAME__DMA "_DMA" #define METHOD_NAME__HID "_HID" #define METHOD_NAME__INI "_INI" diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 1b4c45815695..5a3875744678 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -415,7 +415,7 @@ /* Conditional execution */ #define ACPI_DEBUG_EXEC(a) a -#define ACPI_DEBUG_ONLY_MEMBERS(a) a; +#define ACPI_DEBUG_ONLY_MEMBERS(a) a #define _VERBOSE_STRUCTURES /* Various object display routines for debug */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 3a82faac5767..13d93371790e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -280,6 +280,12 @@ struct acpi_device_power { struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ }; +struct acpi_dep_data { + struct list_head node; + acpi_handle supplier; + acpi_handle consumer; +}; + /* Performance Management */ struct acpi_device_perf_flags { @@ -498,8 +504,6 @@ extern int unregister_acpi_notifier(struct notifier_block *); */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); -struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); -void acpi_bus_put_acpi_device(struct acpi_device *adev); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); @@ -586,8 +590,11 @@ struct acpi_pci_root { /* helper */ -bool acpi_dma_supported(struct acpi_device *adev); +bool acpi_dma_supported(const struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); +int acpi_iommu_fwspec_init(struct device *dev, u32 id, + struct fwnode_handle *fwnode, + const struct iommu_ops *ops); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, @@ -685,6 +692,8 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); +void acpi_dev_clear_dependencies(struct acpi_device *supplier); +struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier); struct acpi_device * acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv); struct acpi_device * @@ -698,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); * @hrv: Hardware Revision of the device, pass -1 to not check _HRV * * The caller is responsible for invoking acpi_dev_put() on the returned device. - * - * FIXME: Due to above requirement there is a window that may invalidate @adev - * and next iteration will use a dangling pointer, e.g. in the case of a - * hotplug event. That said, the caller should ensure that this will never - * happen. */ #define for_each_acpi_dev_match(adev, hid, uid, hrv) \ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ @@ -716,7 +720,15 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev) static inline void acpi_dev_put(struct acpi_device *adev) { - put_device(&adev->dev); + if (adev) + put_device(&adev->dev); +} + +struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); + +static inline void acpi_bus_put_acpi_device(struct acpi_device *adev) +{ + acpi_dev_put(adev); } #else /* CONFIG_ACPI */ diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h index 027faa8883aa..a0212e67d6f4 100644 --- a/include/acpi/acpi_io.h +++ b/include/acpi/acpi_io.h @@ -14,6 +14,14 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, } #endif +#ifndef acpi_os_memmap +static inline void __iomem *acpi_os_memmap(acpi_physical_address phys, + acpi_size size) +{ + return ioremap_cache(phys, size); +} +#endif + extern bool acpi_permanent_mmap; void __iomem __ref diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index 40a91ce87e04..68e4d80c1b32 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -43,4 +43,4 @@ static inline void disable_hmat(void) { } #endif /* CONFIG_ACPI_HMAT */ -#endif /* __ACP_NUMA_H */ +#endif /* __ACPI_NUMA_H */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index f8d44b06f3e3..fa02e3ff0faf 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20210331 +#define ACPI_CA_VERSION 0x20210730 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index ce59903c2695..159070edd031 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -24,6 +24,7 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ +#define ACPI_SIG_AEST "AEST" /* Arm Error Source Table */ #define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ #define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ @@ -327,9 +328,20 @@ struct acpi_cedt_header { enum acpi_cedt_type { ACPI_CEDT_TYPE_CHBS = 0, - ACPI_CEDT_TYPE_RESERVED = 1 + ACPI_CEDT_TYPE_CFMWS = 1, + ACPI_CEDT_TYPE_RESERVED = 2, }; +/* Values for version field above */ + +#define ACPI_CEDT_CHBS_VERSION_CXL11 (0) +#define ACPI_CEDT_CHBS_VERSION_CXL20 (1) + +/* Values for length field above */ + +#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000) +#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000) + /* * CEDT subtables */ @@ -345,6 +357,34 @@ struct acpi_cedt_chbs { u64 length; }; +/* 1: CXL Fixed Memory Window Structure */ + +struct acpi_cedt_cfmws { + struct acpi_cedt_header header; + u32 reserved1; + u64 base_hpa; + u64 window_size; + u8 interleave_ways; + u8 interleave_arithmetic; + u16 reserved2; + u32 granularity; + u16 restrictions; + u16 qtg_id; + u32 interleave_targets[]; +}; + +/* Values for Interleave Arithmetic field above */ + +#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0) + +/* Values for Restrictions field above */ + +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1) +#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1) +#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2) +#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3) +#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4) + /******************************************************************************* * * CPEP - Corrected Platform Error Polling table (ACPI 4.0) @@ -443,7 +483,7 @@ struct acpi_csrt_descriptor { * DBG2 - Debug Port Table 2 * Version 0 (Both main table and subtables) * - * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015 + * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020 * ******************************************************************************/ @@ -493,11 +533,24 @@ struct acpi_dbg2_device { #define ACPI_DBG2_16550_COMPATIBLE 0x0000 #define ACPI_DBG2_16550_SUBSET 0x0001 +#define ACPI_DBG2_MAX311XE_SPI 0x0002 #define ACPI_DBG2_ARM_PL011 0x0003 +#define ACPI_DBG2_MSM8X60 0x0004 +#define ACPI_DBG2_16550_NVIDIA 0x0005 +#define ACPI_DBG2_TI_OMAP 0x0006 +#define ACPI_DBG2_APM88XXXX 0x0008 +#define ACPI_DBG2_MSM8974 0x0009 +#define ACPI_DBG2_SAM5250 0x000A +#define ACPI_DBG2_INTEL_USIF 0x000B +#define ACPI_DBG2_IMX6 0x000C #define ACPI_DBG2_ARM_SBSA_32BIT 0x000D #define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E #define ACPI_DBG2_ARM_DCC 0x000F #define ACPI_DBG2_BCM2835 0x0010 +#define ACPI_DBG2_SDM845_1_8432MHZ 0x0011 +#define ACPI_DBG2_16550_WITH_GAS 0x0012 +#define ACPI_DBG2_SDM845_7_372MHZ 0x0013 +#define ACPI_DBG2_INTEL_LPSS 0x0014 #define ACPI_DBG2_1394_STANDARD 0x0000 diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 18cafe3ebddc..a47b32a5cbde 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -24,6 +24,7 @@ * file. Useful because they make it more difficult to inadvertently type in * the wrong signature. */ +#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */ #define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ #define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ #define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ @@ -39,11 +40,14 @@ #define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */ #define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ +#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */ #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ +#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ #define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ #define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ #define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */ +#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -65,6 +69,190 @@ /******************************************************************************* * + * AEST - Arm Error Source Table + * + * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document + * September 2020. + * + ******************************************************************************/ + +struct acpi_table_aest { + struct acpi_table_header header; + void *node_array[]; +}; + +/* Common Subtable header - one per Node Structure (Subtable) */ + +struct acpi_aest_hdr { + u8 type; + u16 length; + u8 reserved; + u32 node_specific_offset; + u32 node_interface_offset; + u32 node_interrupt_offset; + u32 node_interrupt_count; + u64 timestamp_rate; + u64 reserved1; + u64 error_injection_rate; +}; + +/* Values for Type above */ + +#define ACPI_AEST_PROCESSOR_ERROR_NODE 0 +#define ACPI_AEST_MEMORY_ERROR_NODE 1 +#define ACPI_AEST_SMMU_ERROR_NODE 2 +#define ACPI_AEST_VENDOR_ERROR_NODE 3 +#define ACPI_AEST_GIC_ERROR_NODE 4 +#define ACPI_AEST_NODE_TYPE_RESERVED 5 /* 5 and above are reserved */ + +/* + * AEST subtables (Error nodes) + */ + +/* 0: Processor Error */ + +typedef struct acpi_aest_processor { + u32 processor_id; + u8 resource_type; + u8 reserved; + u8 flags; + u8 revision; + u64 processor_affinity; + +} acpi_aest_processor; + +/* Values for resource_type above, related structs below */ + +#define ACPI_AEST_CACHE_RESOURCE 0 +#define ACPI_AEST_TLB_RESOURCE 1 +#define ACPI_AEST_GENERIC_RESOURCE 2 +#define ACPI_AEST_RESOURCE_RESERVED 3 /* 3 and above are reserved */ + +/* 0R: Processor Cache Resource Substructure */ + +typedef struct acpi_aest_processor_cache { + u32 cache_reference; + u32 reserved; + +} acpi_aest_processor_cache; + +/* Values for cache_type above */ + +#define ACPI_AEST_CACHE_DATA 0 +#define ACPI_AEST_CACHE_INSTRUCTION 1 +#define ACPI_AEST_CACHE_UNIFIED 2 +#define ACPI_AEST_CACHE_RESERVED 3 /* 3 and above are reserved */ + +/* 1R: Processor TLB Resource Substructure */ + +typedef struct acpi_aest_processor_tlb { + u32 tlb_level; + u32 reserved; + +} acpi_aest_processor_tlb; + +/* 2R: Processor Generic Resource Substructure */ + +typedef struct acpi_aest_processor_generic { + u8 *resource; + +} acpi_aest_processor_generic; + +/* 1: Memory Error */ + +typedef struct acpi_aest_memory { + u32 srat_proximity_domain; + +} acpi_aest_memory; + +/* 2: Smmu Error */ + +typedef struct acpi_aest_smmu { + u32 iort_node_reference; + u32 subcomponent_reference; + +} acpi_aest_smmu; + +/* 3: Vendor Defined */ + +typedef struct acpi_aest_vendor { + u32 acpi_hid; + u32 acpi_uid; + u8 vendor_specific_data[16]; + +} acpi_aest_vendor; + +/* 4: Gic Error */ + +typedef struct acpi_aest_gic { + u32 interface_type; + u32 instance_id; + +} acpi_aest_gic; + +/* Values for interface_type above */ + +#define ACPI_AEST_GIC_CPU 0 +#define ACPI_AEST_GIC_DISTRIBUTOR 1 +#define ACPI_AEST_GIC_REDISTRIBUTOR 2 +#define ACPI_AEST_GIC_ITS 3 +#define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */ + +/* Node Interface Structure */ + +typedef struct acpi_aest_node_interface { + u8 type; + u8 reserved[3]; + u32 flags; + u64 address; + u32 error_record_index; + u32 error_record_count; + u64 error_record_implemented; + u64 error_status_reporting; + u64 addressing_mode; + +} acpi_aest_node_interface; + +/* Values for Type field above */ + +#define ACPI_AEST_NODE_SYSTEM_REGISTER 0 +#define ACPI_AEST_NODE_MEMORY_MAPPED 1 +#define ACPI_AEST_XFACE_RESERVED 2 /* 2 and above are reserved */ + +/* Node Interrupt Structure */ + +typedef struct acpi_aest_node_interrupt { + u8 type; + u8 reserved[2]; + u8 flags; + u32 gsiv; + u8 iort_id; + u8 reserved1[3]; + +} acpi_aest_node_interrupt; + +/* Values for Type field above */ + +#define ACPI_AEST_NODE_FAULT_HANDLING 0 +#define ACPI_AEST_NODE_ERROR_RECOVERY 1 +#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */ + +/******************************************************************************* + * + * BDAT - BIOS Data ACPI Table + * + * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5 + * Nov 2020 + * + ******************************************************************************/ + +struct acpi_table_bdat { + struct acpi_table_header header; + struct acpi_generic_address gas; +}; + +/******************************************************************************* + * * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", @@ -446,6 +634,12 @@ struct acpi_ivrs_device_hid { u8 uid_length; }; +/* Values for uid_type above */ + +#define ACPI_IVRS_UID_NOT_PRESENT 0 +#define ACPI_IVRS_UID_IS_INTEGER 1 +#define ACPI_IVRS_UID_IS_STRING 2 + /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ struct acpi_ivrs_memory { @@ -763,6 +957,20 @@ struct acpi_madt_multiproc_wakeup { u64 base_address; }; +#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 +#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 + +struct acpi_madt_multiproc_wakeup_mailbox { + u16 command; + u16 reserved; /* reserved - must be zero */ + u32 apic_id; + u64 wakeup_vector; + u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */ + u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */ +}; + +#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 + /* * Common flags fields for MADT subtables */ @@ -1675,6 +1883,48 @@ struct acpi_pptt_id { /******************************************************************************* * + * PRMT - Platform Runtime Mechanism Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_prmt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +struct acpi_table_prmt_header { + u8 platform_guid[16]; + u32 module_info_offset; + u32 module_info_count; +}; + +struct acpi_prmt_module_header { + u16 revision; + u16 length; +}; + +struct acpi_prmt_module_info { + u16 revision; + u16 length; + u8 module_guid[16]; + u16 major_rev; + u16 minor_rev; + u16 handler_info_count; + u32 handler_info_offset; + u64 mmio_list_pointer; +}; + +struct acpi_prmt_handler_info { + u16 revision; + u16 length; + u8 handler_guid[16]; + u64 handler_address; + u64 static_data_buffer_address; + u64 acpi_param_buffer_address; +}; + +/******************************************************************************* + * * RASF - RAS Feature Table (ACPI 5.0) * Version 1 * @@ -1771,6 +2021,32 @@ enum acpi_rasf_status { /******************************************************************************* * + * RGRT - Regulatory Graphics Resource Table + * Version 1 + * + * Conforms to "ACPI RGRT" available at: + * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/ + * + ******************************************************************************/ + +struct acpi_table_rgrt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 version; + u8 image_type; + u8 reserved; + u8 image[0]; +}; + +/* image_type values */ + +enum acpi_rgrt_image_type { + ACPI_RGRT_TYPE_RESERVED0 = 0, + ACPI_RGRT_IMAGE_TYPE_PNG = 1, + ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/******************************************************************************* + * * SBST - Smart Battery Specification Table * Version 1 * @@ -1899,6 +2175,37 @@ struct acpi_sdev_pcie_path { u8 function; }; +/******************************************************************************* + * + * SVKL - Storage Volume Key Location Table (ACPI 6.4) + * From: "Guest-Host-Communication Interface (GHCI) for Intel + * Trust Domain Extensions (Intel TDX)". + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_svkl { + struct acpi_table_header header; /* Common ACPI table header */ + u32 count; +}; + +struct acpi_svkl_key { + u16 type; + u16 format; + u32 size; + u64 address; +}; + +enum acpi_svkl_type { + ACPI_SVKL_TYPE_MAIN_STORAGE = 0, + ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */ +}; + +enum acpi_svkl_format { + ACPI_SVKL_FORMAT_RAW_BINARY = 0, + ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */ +}; + /* Reset to default packing */ #pragma pack() diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 86903ac5bbc5..9125e2f16329 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -723,6 +723,10 @@ struct acpi_table_wpbt { u16 arguments_length; }; +struct acpi_wpbt_unicode { + u16 *unicode_string; +}; + /******************************************************************************* * * WSMT - Windows SMM Security Mitigations Table diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 9f4985b4d64d..bc159a9b4a73 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -135,6 +135,7 @@ struct cppc_cpudata { #ifdef CONFIG_ACPI_CPPC_LIB extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); +extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); @@ -149,6 +150,10 @@ static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf) { return -ENOTSUPP; } +static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) +{ + return -ENOTSUPP; +} static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs) { return -ENOTSUPP; diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index f6656be81760..fb172a03a753 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -22,7 +22,7 @@ typedef __builtin_va_list va_list; #define va_arg(v, l) __builtin_va_arg(v, l) #define va_copy(d, s) __builtin_va_copy(d, s) #else -#include <stdarg.h> +#include <linux/stdarg.h> #endif #endif diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h deleted file mode 100644 index 073cf40f431b..000000000000 --- a/include/asm-generic/atomic-long.h +++ /dev/null @@ -1,1014 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-long.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _ASM_GENERIC_ATOMIC_LONG_H -#define _ASM_GENERIC_ATOMIC_LONG_H - -#include <linux/compiler.h> -#include <asm/types.h> - -#ifdef CONFIG_64BIT -typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed -#else -typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed -#endif - -#ifdef CONFIG_64BIT - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic64_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic64_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic64_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic64_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic64_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic64_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic64_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic64_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic64_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic64_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic64_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic64_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic64_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic64_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic64_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic64_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic64_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic64_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic64_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic64_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic64_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic64_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic64_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic64_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic64_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic64_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic64_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic64_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic64_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic64_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic64_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic64_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic64_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic64_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic64_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic64_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic64_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_release(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic64_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic64_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic64_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic64_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic64_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic64_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic64_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic64_dec_if_positive(v); -} - -#else /* CONFIG_64BIT */ - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_acquire(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_release(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_relaxed(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic_dec_if_positive(v); -} - -#endif /* CONFIG_64BIT */ -#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 11f96f40f4a7..04b8be9f1a77 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Generic C implementation of atomic counter operations. Usable on - * UP systems only. Do not include in machine independent code. + * Generic C implementation of atomic counter operations. Do not include in + * machine independent code. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -12,56 +12,39 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> -/* - * atomic_$op() - $op integer to atomic variable - * @i: integer value to $op - * @v: pointer to the atomic variable - * - * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use - * smp_mb__{before,after}_atomic(). - */ - -/* - * atomic_$op_return() - $op interer to atomic variable and returns the result - * @i: integer value to $op - * @v: pointer to the atomic variable - * - * Atomically $ops @i to @v. Does imply a full memory barrier. - */ - #ifdef CONFIG_SMP /* we can build all atomic primitives from cmpxchg */ #define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void generic_atomic_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c c_op i; \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ { \ int c, old; \ \ c = v->counter; \ - while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c; \ @@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #include <linux/irqflags.h> #define ATOMIC_OP(op, c_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void generic_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(op, c_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ @@ -110,87 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #endif /* CONFIG_SMP */ -#ifndef atomic_add_return ATOMIC_OP_RETURN(add, +) -#endif - -#ifndef atomic_sub_return ATOMIC_OP_RETURN(sub, -) -#endif -#ifndef atomic_fetch_add ATOMIC_FETCH_OP(add, +) -#endif - -#ifndef atomic_fetch_sub ATOMIC_FETCH_OP(sub, -) -#endif - -#ifndef atomic_fetch_and ATOMIC_FETCH_OP(and, &) -#endif - -#ifndef atomic_fetch_or ATOMIC_FETCH_OP(or, |) -#endif - -#ifndef atomic_fetch_xor ATOMIC_FETCH_OP(xor, ^) -#endif -#ifndef atomic_and +ATOMIC_OP(add, +) +ATOMIC_OP(sub, -) ATOMIC_OP(and, &) -#endif - -#ifndef atomic_or ATOMIC_OP(or, |) -#endif - -#ifndef atomic_xor ATOMIC_OP(xor, ^) -#endif #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - */ - -/** - * atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ -#ifndef atomic_read -#define atomic_read(v) READ_ONCE((v)->counter) -#endif - -/** - * atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_add_return generic_atomic_add_return +#define arch_atomic_sub_return generic_atomic_sub_return -#include <linux/irqflags.h> +#define arch_atomic_fetch_add generic_atomic_fetch_add +#define arch_atomic_fetch_sub generic_atomic_fetch_sub +#define arch_atomic_fetch_and generic_atomic_fetch_and +#define arch_atomic_fetch_or generic_atomic_fetch_or +#define arch_atomic_fetch_xor generic_atomic_fetch_xor -static inline void atomic_add(int i, atomic_t *v) -{ - atomic_add_return(i, v); -} +#define arch_atomic_add generic_atomic_add +#define arch_atomic_sub generic_atomic_sub +#define arch_atomic_and generic_atomic_and +#define arch_atomic_or generic_atomic_or +#define arch_atomic_xor generic_atomic_xor -static inline void atomic_sub(int i, atomic_t *v) -{ - atomic_sub_return(i, v); -} +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 370f01d4450f..100d24b02e52 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -15,19 +15,17 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } -extern s64 atomic64_read(const atomic64_t *v); -extern void atomic64_set(atomic64_t *v, s64 i); - -#define atomic64_set_release(v, i) atomic64_set((v), (i)) +extern s64 generic_atomic64_read(const atomic64_t *v); +extern void generic_atomic64_set(atomic64_t *v, s64 i); #define ATOMIC64_OP(op) \ -extern void atomic64_##op(s64 a, atomic64_t *v); +extern void generic_atomic64_##op(s64 a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ -extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); +extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v); #define ATOMIC64_FETCH_OP(op) \ -extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); +extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -46,11 +44,32 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -extern s64 atomic64_dec_if_positive(atomic64_t *v); -#define atomic64_dec_if_positive atomic64_dec_if_positive -extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); -extern s64 atomic64_xchg(atomic64_t *v, s64 new); -extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +extern s64 generic_atomic64_dec_if_positive(atomic64_t *v); +extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); +extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new); +extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); + +#define arch_atomic64_read generic_atomic64_read +#define arch_atomic64_set generic_atomic64_set +#define arch_atomic64_set_release generic_atomic64_set + +#define arch_atomic64_add generic_atomic64_add +#define arch_atomic64_add_return generic_atomic64_add_return +#define arch_atomic64_fetch_add generic_atomic64_fetch_add +#define arch_atomic64_sub generic_atomic64_sub +#define arch_atomic64_sub_return generic_atomic64_sub_return +#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub + +#define arch_atomic64_and generic_atomic64_and +#define arch_atomic64_fetch_and generic_atomic64_fetch_and +#define arch_atomic64_or generic_atomic64_or +#define arch_atomic64_fetch_or generic_atomic64_fetch_or +#define arch_atomic64_xor generic_atomic64_xor +#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor + +#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive +#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg +#define arch_atomic64_xchg generic_atomic64_xchg +#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 0e7316a86240..3096f086b5a3 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -11,25 +11,29 @@ * See Documentation/atomic_bitops.txt for details. */ -static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } -static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) if (READ_ONCE(*p) & mask) return 1; - old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } -static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) if (!(READ_ONCE(*p) & mask)) return 0; - old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } -static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); return !!(old & mask); } +#include <asm-generic/bitops/instrumented-atomic.h> + #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h index 1dacfdb4247e..7b129329046b 100644 --- a/include/asm-generic/bitops/builtin-ffs.h +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -8,7 +8,7 @@ * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). + * differs in spirit from ffz (man ffs). */ #define ffs(x) __builtin_ffs(x) diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h index e81868b2c0f0..323fd5d6ae26 100644 --- a/include/asm-generic/bitops/ffs.h +++ b/include/asm-generic/bitops/ffs.h @@ -8,7 +8,7 @@ * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). + * differs in spirit from ffz (man ffs). */ static inline int ffs(int x) { diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 3ae021368f48..630f2f6b9595 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -7,7 +7,7 @@ #include <asm/barrier.h> /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * @@ -15,8 +15,8 @@ * the returned value is 0. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock(unsigned int nr, - volatile unsigned long *p) +static __always_inline int +arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr, if (READ_ONCE(*p) & mask) return 1; - old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) * * See for example x86's implementation. */ -static inline void __clear_bit_unlock(unsigned int nr, - volatile unsigned long *p) +static inline void +arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { unsigned long old; p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); - atomic_long_set_release((atomic_long_t *)p, old); + arch_atomic_long_set_release((atomic_long_t *)p, old); } /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. + * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ -#ifndef clear_bit_unlock_is_negative_byte -static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_clear_bit_unlock_is_negative_byte +static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #endif +#include <asm-generic/bitops/instrumented-lock.h> + #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 7e10c4b50c5d..078cc68be2f1 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -5,7 +5,7 @@ #include <asm/types.h> /** - * __set_bit - Set a bit in memory + * arch___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -13,24 +13,28 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } +#define __set_bit arch___set_bit -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; } +#define __clear_bit arch___clear_bit /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline +void arch___change_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p ^= mask; } +#define __change_bit arch___change_bit /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline int +arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) *p = old | mask; return (old & mask) != 0; } +#define __test_and_set_bit arch___test_and_set_bit /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline int +arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) *p = old & ~mask; return (old & mask) != 0; } +#define __test_and_clear_bit arch___test_and_clear_bit /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline int +arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -95,15 +105,18 @@ static inline int __test_and_change_bit(int nr, *p = old ^ mask; return (old & mask) != 0; } +#define __test_and_change_bit arch___test_and_change_bit /** - * test_bit - Determine whether a bit is set + * arch_test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline int +arch_test_bit(unsigned int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +#define test_bit arch_test_bit #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index b402494883b6..edb0e2a602a8 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -4,6 +4,7 @@ #include <linux/compiler.h> #include <linux/instrumentation.h> +#include <linux/once_lite.h> #define CUT_HERE "------------[ cut here ]------------\n" @@ -17,7 +18,8 @@ #endif #ifndef __ASSEMBLY__ -#include <linux/kernel.h> +#include <linux/panic.h> +#include <linux/printk.h> #ifdef CONFIG_BUG @@ -140,39 +142,15 @@ void __warn(const char *file, int line, void *caller, unsigned taint, }) #ifndef WARN_ON_ONCE -#define WARN_ON_ONCE(condition) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN_ON(1); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_ON_ONCE(condition) \ + DO_ONCE_LITE_IF(condition, WARN_ON, 1) #endif -#define WARN_ONCE(condition, format...) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN(1, format); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_ONCE(condition, format...) \ + DO_ONCE_LITE_IF(condition, WARN, 1, format) -#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - WARN_TAINT(1, taint, format); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define WARN_TAINT_ONCE(condition, taint, format...) \ + DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format) #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index f17f14f84d09..380cdc824e4b 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr) * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned * long parameter, supporting various types of architectures. */ -static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, +static inline unsigned long __generic_cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long flags, prev; @@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, /* * Generic version of __cmpxchg64_local. Takes an u64 parameter. */ -static inline u64 __cmpxchg64_local_generic(volatile void *ptr, +static inline u64 __generic_cmpxchg64_local(volatile void *ptr, u64 old, u64 new) { u64 prev; diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 9a24510cd8c1..dca4419922a9 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -14,16 +14,14 @@ #include <linux/types.h> #include <linux/irqflags.h> -#ifndef xchg - /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalidly-sized xchg(). */ -extern void __xchg_called_with_bad_pointer(void); +extern void __generic_xchg_called_with_bad_pointer(void); static inline -unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) { unsigned long ret, flags; @@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) #endif /* CONFIG_64BIT */ default: - __xchg_called_with_bad_pointer(); + __generic_xchg_called_with_bad_pointer(); return x; } } -#define xchg(ptr, x) ({ \ - ((__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +#define generic_xchg(ptr, x) ({ \ + ((__typeof__(*(ptr))) \ + __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ }) -#endif /* xchg */ - /* * Atomic compare and exchange. */ #include <asm-generic/cmpxchg-local.h> -#ifndef cmpxchg_local -#define cmpxchg_local(ptr, o, n) ({ \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))); \ +#define generic_cmpxchg_local(ptr, o, n) ({ \ + ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))); \ }) + +#define generic_cmpxchg64_local(ptr, o, n) \ + __generic_cmpxchg64_local((ptr), (o), (n)) + + +#ifndef arch_xchg +#define arch_xchg generic_xchg +#endif + +#ifndef arch_cmpxchg_local +#define arch_cmpxchg_local generic_cmpxchg_local #endif -#ifndef cmpxchg64_local -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#ifndef arch_cmpxchg64_local +#define arch_cmpxchg64_local generic_cmpxchg64_local #endif -#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +#define arch_cmpxchg arch_cmpxchg_local +#define arch_cmpxchg64 arch_cmpxchg64_local #endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h index 30f7b18a36f9..d46c0201cc34 100644 --- a/include/asm-generic/compat.h +++ b/include/asm-generic/compat.h @@ -20,7 +20,18 @@ typedef u16 compat_ushort_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u32 compat_uptr_t; +typedef u32 compat_caddr_t; typedef u32 compat_aio_context_t; +typedef u32 compat_old_sigset_t; + +#ifndef __compat_uid32_t +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +#endif + +#ifndef compat_mode_t +typedef u32 compat_mode_t; +#endif #ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT typedef s64 __attribute__((aligned(4))) compat_s64; @@ -30,4 +41,10 @@ typedef s64 compat_s64; typedef u64 compat_u64; #endif +#ifndef _COMPAT_NSIG +typedef u32 compat_sigset_word; +#define _COMPAT_NSIG _NSIG +#define _COMPAT_NSIG_BPW 32 +#endif + #endif diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index cd905b44a630..13f5aa68a455 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -57,17 +57,11 @@ /* * If the divisor happens to be constant, we determine the appropriate * inverse at compile time to turn the division into a few inline - * multiplications which ought to be much faster. And yet only if compiling - * with a sufficiently recent gcc version to perform proper 64-bit constant - * propagation. + * multiplications which ought to be much faster. * * (It is unfortunate that gcc doesn't perform all this internally.) */ -#ifndef __div64_const32_is_OK -#define __div64_const32_is_OK (__GNUC__ >= 4) -#endif - #define __div64_const32(n, ___b) \ ({ \ /* \ @@ -230,8 +224,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); is_power_of_2(__base)) { \ __rem = (n) & (__base - 1); \ (n) >>= ilog2(__base); \ - } else if (__div64_const32_is_OK && \ - __builtin_constant_p(__base) && \ + } else if (__builtin_constant_p(__base) && \ __base != 0) { \ uint32_t __res_lo, __n_lo = (n); \ (n) = __div64_const32(n, __base); \ @@ -241,8 +234,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); } else if (likely(((n) >> 32) == 0)) { \ __rem = (uint32_t)(n) % __base; \ (n) = (uint32_t)(n) / __base; \ - } else \ + } else { \ __rem = __div64_32(&(n), __base); \ + } \ __rem; \ }) diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9def22e6e2b3..9d0479f50f97 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr, extern void early_iounmap(void __iomem *addr, unsigned long size); extern void early_memunmap(void *addr, unsigned long size); -/* - * Weak function called by early_ioremap_reset(). It does nothing, but - * architectures may provide their own version to do any needed cleanups. - */ -extern void early_ioremap_shutdown(void); - #if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) /* Arch-specific initialization */ extern void early_ioremap_init(void); diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 515c3fb06ab3..56348a541c50 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -194,6 +194,7 @@ enum HV_GENERIC_SET_FORMAT { #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 #define HV_STATUS_INVALID_ALIGNMENT 4 #define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_ACCESS_DENIED 6 #define HV_STATUS_OPERATION_DENIED 8 #define HV_STATUS_INSUFFICIENT_MEMORY 11 #define HV_STATUS_INVALID_PORT_ID 17 diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h new file mode 100644 index 000000000000..a53116b8c57e --- /dev/null +++ b/include/asm-generic/logic_io.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: johannes@sipsolutions.net + */ +#ifndef _LOGIC_IO_H +#define _LOGIC_IO_H +#include <linux/types.h> + +/* include this file into asm/io.h */ + +#ifdef CONFIG_INDIRECT_IOMEM + +#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK +/* + * If you want emulated IO memory to fall back to 'normal' IO memory + * if a region wasn't registered as emulated, then you need to have + * all of the real_* functions implemented. + */ +#if !defined(real_ioremap) || !defined(real_iounmap) || \ + !defined(real_raw_readb) || !defined(real_raw_writeb) || \ + !defined(real_raw_readw) || !defined(real_raw_writew) || \ + !defined(real_raw_readl) || !defined(real_raw_writel) || \ + (defined(CONFIG_64BIT) && \ + (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \ + !defined(real_memset_io) || \ + !defined(real_memcpy_fromio) || \ + !defined(real_memcpy_toio) +#error "Must provide fallbacks for real IO memory access" +#endif /* defined ... */ +#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ + +#define ioremap ioremap +void __iomem *ioremap(phys_addr_t offset, size_t size); + +#define iounmap iounmap +void iounmap(void __iomem *addr); + +#define __raw_readb __raw_readb +u8 __raw_readb(const volatile void __iomem *addr); + +#define __raw_readw __raw_readw +u16 __raw_readw(const volatile void __iomem *addr); + +#define __raw_readl __raw_readl +u32 __raw_readl(const volatile void __iomem *addr); + +#ifdef CONFIG_64BIT +#define __raw_readq __raw_readq +u64 __raw_readq(const volatile void __iomem *addr); +#endif /* CONFIG_64BIT */ + +#define __raw_writeb __raw_writeb +void __raw_writeb(u8 value, volatile void __iomem *addr); + +#define __raw_writew __raw_writew +void __raw_writew(u16 value, volatile void __iomem *addr); + +#define __raw_writel __raw_writel +void __raw_writel(u32 value, volatile void __iomem *addr); + +#ifdef CONFIG_64BIT +#define __raw_writeq __raw_writeq +void __raw_writeq(u64 value, volatile void __iomem *addr); +#endif /* CONFIG_64BIT */ + +#define memset_io memset_io +void memset_io(volatile void __iomem *addr, int value, size_t size); + +#define memcpy_fromio memcpy_fromio +void memcpy_fromio(void *buffer, const volatile void __iomem *addr, + size_t size); + +#define memcpy_toio memcpy_toio +void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size); + +#endif /* CONFIG_INDIRECT_IOMEM */ +#endif /* _LOGIC_IO_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 7637fb46ba4f..a2c8ed60233a 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -6,47 +6,18 @@ #ifndef __ASSEMBLY__ +/* + * supports 3 memory models. + */ #if defined(CONFIG_FLATMEM) #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif -#elif defined(CONFIG_DISCONTIGMEM) - -#ifndef arch_pfn_to_nid -#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) -#endif - -#ifndef arch_local_page_offset -#define arch_local_page_offset(pfn, nid) \ - ((pfn) - NODE_DATA(nid)->node_start_pfn) -#endif - -#endif /* CONFIG_DISCONTIGMEM */ - -/* - * supports 3 memory models. - */ -#if defined(CONFIG_FLATMEM) - #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) -#elif defined(CONFIG_DISCONTIGMEM) - -#define __pfn_to_page(pfn) \ -({ unsigned long __pfn = (pfn); \ - unsigned long __nid = arch_pfn_to_nid(__pfn); \ - NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ -}) - -#define __page_to_pfn(pg) \ -({ const struct page *__pg = (pg); \ - struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ - (unsigned long)(__pg - __pgdat->node_mem_map) + \ - __pgdat->node_start_pfn; \ -}) #elif defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -70,7 +41,7 @@ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) -#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +#endif /* CONFIG_FLATMEM/SPARSEMEM */ /* * Convert a physical address to a Page Frame Number and back diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 9a000ba2bb75..c1ab6a6e72b5 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -22,6 +22,7 @@ #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/cpumask.h> +#include <linux/nmi.h> #include <asm/ptrace.h> #include <asm/hyperv-tlfs.h> @@ -38,6 +39,9 @@ struct ms_hyperv_info { }; extern struct ms_hyperv_info ms_hyperv; +extern void __percpu **hyperv_pcpu_input_arg; +extern void __percpu **hyperv_pcpu_output_arg; + extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); @@ -151,6 +155,8 @@ void hv_remove_crash_handler(void); extern int vmbus_interrupt; extern int vmbus_irq; +extern bool hv_root_partition; + #if IS_ENABLED(CONFIG_HYPERV) /* * Hypervisor's notion of virtual processor ID is different from @@ -161,9 +167,16 @@ extern int vmbus_irq; extern u32 *hv_vp_index; extern u32 hv_max_vp_index; +extern u64 (*hv_read_reference_counter)(void); + /* Sentinel value for an uninitialized entry in hv_vp_index array */ #define VP_INVAL U32_MAX +int __init hv_common_init(void); +void __init hv_common_free(void); +int hv_common_cpu_init(unsigned int cpu); +int hv_common_cpu_die(unsigned int cpu); + void *hv_alloc_hyperv_page(void); void *hv_alloc_hyperv_zeroed_page(void); void hv_free_hyperv_page(unsigned long addr); diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index d4f16dcc2ed7..df636c6d8e6c 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -52,4 +52,4 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, } #endif -#endif /* __ASM_GENERIC_IO_H */ +#endif /* __ASM_GENERIC_PCI_IOMAP_H */ diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h index ce2cbb3c380f..03b7dae47dd4 100644 --- a/include/asm-generic/pgtable-nop4d.h +++ b/include/asm-generic/pgtable-nop4d.h @@ -9,7 +9,6 @@ typedef struct { pgd_t pgd; } p4d_t; #define P4D_SHIFT PGDIR_SHIFT -#define MAX_PTRS_PER_P4D 1 #define PTRS_PER_P4D 1 #define P4D_SIZE (1UL << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE-1)) @@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) #define __p4d(x) ((p4d_t) { __pgd(x) }) #define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) -#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd }))) /* * allocating and freeing a p4d is trivial: the 1-entry p4d is diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 3e13acd019ae..10789cf51d16 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -51,7 +51,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) #define __pmd(x) ((pmd_t) { __pud(x) } ) #define pud_page(pud) (pmd_page((pmd_t){ pud })) -#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) +#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud }))) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index a9d751fbda9e..eb70c6d7ceff 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) #define __pud(x) ((pud_t) { __p4d(x) }) #define p4d_page(p4d) (pud_page((pud_t){ p4d })) -#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) +#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d }))) /* * allocating and freeing a pud is trivial: the 1-entry pud is diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index d683f5e6d791..b4d43a4af5f7 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc) } while (0) #define init_idle_preempt_count(p, cpu) do { \ - task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ } while (0) static __always_inline void set_preempt_need_resched(void) diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 5aa8705df87e..4dbe715be65b 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -45,7 +45,7 @@ #endif #ifndef cpumask_of_node - #ifdef CONFIG_NEED_MULTIPLE_NODES + #ifdef CONFIG_NUMA #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else #define cpumask_of_node(node) ((void)(node), cpu_online_mask) diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 4973328f3c6e..10ffa8b5c117 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -19,7 +19,7 @@ __get_user_fn(size_t size, const void __user *from, void *to) switch (size) { case 1: - *(u8 *)to = get_unaligned((u8 __force *)from); + *(u8 *)to = *((u8 __force *)from); return 0; case 2: *(u16 *)to = get_unaligned((u16 __force *)from); @@ -45,7 +45,7 @@ __put_user_fn(size_t size, void __user *to, void *from) switch (size) { case 1: - put_unaligned(*(u8 *)from, (u8 __force *)to); + *(u8 __force *)to = *(u8 *)from; return 0; case 2: put_unaligned(*(u16 *)from, (u16 __force *)to); @@ -119,6 +119,11 @@ static inline void set_fs(mm_segment_t fs) #ifndef uaccess_kernel #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif + +#ifndef user_addr_max +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) +#endif + #endif /* CONFIG_SET_FS */ #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) @@ -244,50 +249,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) extern int __get_user_bad(void) __attribute__((noreturn)); /* - * Copy a null terminated string from userspace. - */ -#ifndef __strncpy_from_user -static inline long -__strncpy_from_user(char *dst, const char __user *src, long count) -{ - char *tmp; - strncpy(dst, (const char __force *)src, count); - for (tmp = dst; *tmp && count > 0; tmp++, count--) - ; - return (tmp - dst); -} -#endif - -static inline long -strncpy_from_user(char *dst, const char __user *src, long count) -{ - if (!access_ok(src, 1)) - return -EFAULT; - return __strncpy_from_user(dst, src, count); -} - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -#ifndef __strnlen_user -#define __strnlen_user(s, n) (strnlen((s), (n)) + 1) -#endif - -/* - * Unlike strnlen, strnlen_user includes the nul terminator in - * its returned count. Callers should check for a returned value - * greater than N as an indication the string is too long. - */ -static inline long strnlen_user(const char __user *src, long n) -{ - if (!access_ok(src, 1)) - return 0; - return __strnlen_user(src, n); -} - -/* * Zero Userspace */ #ifndef __clear_user @@ -311,4 +272,8 @@ clear_user(void __user *to, unsigned long n) #include <asm/extable.h> +__must_check long strncpy_from_user(char *dst, const char __user *src, + long count); +__must_check long strnlen_user(const char __user *src, long n); + #endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 374c940e9be1..1c4242416c9f 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -6,31 +6,124 @@ * This is the most generic implementation of unaligned accesses * and should work almost anywhere. */ +#include <linux/unaligned/packed_struct.h> #include <asm/byteorder.h> -/* Set by the arch if it can handle unaligned accesses in hardware. */ -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include <linux/unaligned/access_ok.h> -#endif - -#if defined(__LITTLE_ENDIAN) -# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include <linux/unaligned/le_struct.h> -# include <linux/unaligned/be_byteshift.h> -# endif -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_le -# define put_unaligned __put_unaligned_le -#elif defined(__BIG_ENDIAN) -# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -# include <linux/unaligned/be_struct.h> -# include <linux/unaligned/le_byteshift.h> -# endif -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_be -# define put_unaligned __put_unaligned_be -#else -# error need to define endianess -#endif +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x; \ +}) + +#define __put_unaligned_t(type, val, ptr) do { \ + struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ + __pptr->x = (val); \ +} while (0) + +#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) +#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_t(__le16, p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_t(__le32, p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_t(__le64, p)); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_t(__le16, cpu_to_le16(val), p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_t(__le32, cpu_to_le32(val), p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_t(__le64, cpu_to_le64(val), p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_t(__be16, p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_t(__be32, p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_t(__be64, p)); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_t(__be16, cpu_to_be16(val), p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_t(__be32, cpu_to_be32(val), p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_t(__be64, cpu_to_be64(val), p); +} + +static inline u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} #endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 40a9c101565e..aa50bf2959fe 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -483,6 +483,8 @@ \ TRACEDATA \ \ + PRINTK_INDEX \ + \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ __start___ksymtab = .; \ @@ -586,6 +588,7 @@ NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ + *(.text.asan.* .text.tsan.*) \ TEXT_CFI_JT \ MEM_KEEP(init.text*) \ MEM_KEEP(exit.text*) \ @@ -893,6 +896,17 @@ #define TRACEDATA #endif +#ifdef CONFIG_PRINTK_INDEX +#define PRINTK_INDEX \ + .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ + __start_printk_index = .; \ + *(.printk_index) \ + __stop_printk_index = .; \ + } +#else +#define PRINTK_INDEX +#endif + #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ __start_notes = .; \ @@ -960,6 +974,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT #define PERCPU_DECRYPTED_SECTION \ . = ALIGN(PAGE_SIZE); \ + *(.data..decrypted) \ *(.data..percpu..decrypted) \ . = ALIGN(PAGE_SIZE); #else diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h index b6774aa5a4b8..b3f5d73ae1d6 100644 --- a/include/clocksource/hyperv_timer.h +++ b/include/clocksource/hyperv_timer.h @@ -20,6 +20,8 @@ #define HV_MAX_MAX_DELTA_TICKS 0xffffffff #define HV_MIN_DELTA_TICKS 1 +#ifdef CONFIG_HYPERV_TIMER + /* Routines called by the VMbus driver */ extern int hv_stimer_alloc(bool have_percpu_irqs); extern int hv_stimer_cleanup(unsigned int cpu); @@ -28,8 +30,6 @@ extern void hv_stimer_legacy_cleanup(unsigned int cpu); extern void hv_stimer_global_cleanup(void); extern void hv_stimer0_isr(void); -#ifdef CONFIG_HYPERV_TIMER -extern u64 (*hv_read_reference_counter)(void); extern void hv_init_clocksource(void); extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void); @@ -100,6 +100,13 @@ static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, { return U64_MAX; } + +static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; } +static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {} +static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {} +static inline void hv_stimer_global_cleanup(void) {} +static inline void hv_stimer0_isr(void) {} + #endif /* CONFIG_HYPERV_TIMER */ #endif diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h index c395238d0922..9b435caa95fe 100644 --- a/include/clocksource/samsung_pwm.h +++ b/include/clocksource/samsung_pwm.h @@ -27,6 +27,7 @@ struct samsung_pwm_variant { }; void samsung_pwm_clocksource_init(void __iomem *base, - unsigned int *irqs, struct samsung_pwm_variant *variant); + unsigned int *irqs, + const struct samsung_pwm_variant *variant); #endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */ diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 4c61dade8835..f6da8a132639 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -74,6 +74,7 @@ #define OMAP_TIMER_ERRATA_I103_I767 0x80000000 struct timer_regs { + u32 ocp_cfg; u32 tidr; u32 tier; u32 twer; diff --git a/include/crypto/aead.h b/include/crypto/aead.h index e728469c4ccc..5af914c1ab8e 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -490,7 +490,7 @@ static inline void aead_request_set_callback(struct aead_request *req, * The memory structure for cipher operation has the following structure: * * - AEAD encryption input: assoc data || plaintext - * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD encryption output: assoc data || ciphertext || auth tag * - AEAD decryption input: assoc data || ciphertext || auth tag * - AEAD decryption output: assoc data || plaintext * diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 86f0748009af..5f6841c73e5a 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -96,6 +96,15 @@ struct scatter_walk { unsigned int offset; }; +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + void crypto_mod_put(struct crypto_alg *alg); int crypto_register_template(struct crypto_template *tmpl); @@ -118,7 +127,6 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); -int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, struct crypto_alg *alg); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 3f06e40d063a..26cac19b0f46 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -28,7 +28,7 @@ * of a failed backlog request * crypto-engine, in head position to keep order * @list: link with the global crypto engine list - * @queue_lock: spinlock to syncronise access to request queue + * @queue_lock: spinlock to synchronise access to request queue * @queue: the crypto queue of the engine * @rt: whether this queue is set to run as a realtime task * @prepare_crypt_hardware: a request will soon arrive from the queue diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b2bc1e46e86a..f140e4643949 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -458,7 +458,7 @@ int crypto_ahash_finup(struct ahash_request *req); * * Return: * 0 if the message digest was successfully calculated; - * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; + * -EINPROGRESS if data is fed into hardware (DMA) or queued for later; * -EBUSY if queue is full and request should be resubmitted later; * other < 0 if an error occurred */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 0a288dddcf5b..25806141db59 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); -int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen); - -static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) -{ - return alg->setkey != shash_no_setkey; -} +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 47accec68cb0..f603325c0c30 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key); struct public_key_signature { struct asymmetric_key_id *auth_ids[2]; u8 *s; /* Signature */ - u32 s_size; /* Number of bytes in signature */ u8 *digest; - u8 digest_size; /* Number of bytes in digest */ + u32 s_size; /* Number of bytes in signature */ + u32 digest_size; /* Number of bytes in digest */ const char *pkey_algo; const char *hash_algo; const char *encoding; diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index c837d0775474..7af08174a721 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -81,12 +81,7 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, struct page *page; page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as - * PageSlab cannot be optimised away per se due to - * use of volatile pointer. - */ - if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) - flush_dcache_page(page); + flush_dcache_page(page); } if (more && walk->offset >= walk->sg->offset + walk->sg->length) diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h index 7afd730d16ff..709f286e7b25 100644 --- a/include/crypto/sm4.h +++ b/include/crypto/sm4.h @@ -3,6 +3,7 @@ /* * Common values for the SM4 algorithm * Copyright (C) 2018 ARM Limited or its affiliates. + * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #ifndef _CRYPTO_SM4_H @@ -15,17 +16,29 @@ #define SM4_BLOCK_SIZE 16 #define SM4_RKEY_WORDS 32 -struct crypto_sm4_ctx { +struct sm4_ctx { u32 rkey_enc[SM4_RKEY_WORDS]; u32 rkey_dec[SM4_RKEY_WORDS]; }; -int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len); -int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, +/** + * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + */ +int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, unsigned int key_len); -void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); -void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +/** + * sm4_crypt_block - Encrypt or decrypt a single SM4 block + * @rk: The rkey_enc for encrypt or rkey_dec for decrypt + * @out: Buffer to store output data + * @in: Buffer containing the input data + */ +void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in); #endif diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 336e36506910..0f66a0d9f06d 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -53,12 +53,15 @@ enum amd_asic_type { CHIP_RENOIR, /* 24 */ CHIP_ALDEBARAN, /* 25 */ CHIP_NAVI10, /* 26 */ - CHIP_NAVI14, /* 27 */ - CHIP_NAVI12, /* 28 */ - CHIP_SIENNA_CICHLID, /* 29 */ - CHIP_NAVY_FLOUNDER, /* 30 */ - CHIP_VANGOGH, /* 31 */ - CHIP_DIMGREY_CAVEFISH, /* 32 */ + CHIP_CYAN_SKILLFISH, /* 27 */ + CHIP_NAVI14, /* 28 */ + CHIP_NAVI12, /* 29 */ + CHIP_SIENNA_CICHLID, /* 30 */ + CHIP_NAVY_FLOUNDER, /* 31 */ + CHIP_VANGOGH, /* 32 */ + CHIP_DIMGREY_CAVEFISH, /* 33 */ + CHIP_BEIGE_GOBY, /* 34 */ + CHIP_YELLOW_CARP, /* 35 */ CHIP_LAST, }; diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index ea34ca146b82..2a1f85f9a8a3 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -126,6 +126,8 @@ struct dw_hdmi_phy_ops { struct dw_hdmi_plat_data { struct regmap *regm; + unsigned int output_port; + unsigned long input_bus_encoding; bool use_drm_infoframe; bool ycbcr_420_allowed; @@ -153,6 +155,8 @@ struct dw_hdmi_plat_data { const struct dw_hdmi_phy_config *phy_config; int (*configure_phy)(struct dw_hdmi *hdmi, void *data, unsigned long mpixelclock); + + unsigned int disable_cec : 1; }; struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h deleted file mode 100644 index f3136750c490..000000000000 --- a/include/drm/drm_agpsupport.h +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _DRM_AGPSUPPORT_H_ -#define _DRM_AGPSUPPORT_H_ - -#include <linux/agp_backend.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/mm.h> -#include <linux/mutex.h> -#include <linux/types.h> -#include <uapi/drm/drm.h> - -struct drm_device; -struct drm_file; - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_AGP) - -struct drm_agp_head *drm_agp_init(struct drm_device *dev); -void drm_legacy_agp_clear(struct drm_device *dev); -int drm_agp_acquire(struct drm_device *dev); -int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_release(struct drm_device *dev); -int drm_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - -#else /* CONFIG_AGP */ - -static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline void drm_legacy_agp_clear(struct drm_device *dev) -{ -} - -static inline int drm_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -#endif /* CONFIG_AGP */ - -#endif /* _DRM_AGPSUPPORT_H_ */ diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h new file mode 100644 index 000000000000..7096703c3949 --- /dev/null +++ b/include/drm/drm_aperture.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_APERTURE_H_ +#define _DRM_APERTURE_H_ + +#include <linux/types.h> + +struct drm_device; +struct drm_driver; +struct pci_dev; + +int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, + resource_size_t size); + +int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, + bool primary, const struct drm_driver *req_driver); + +int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const struct drm_driver *req_driver); + +/** + * drm_aperture_remove_framebuffers - remove all existing framebuffers + * @primary: also kick vga16fb if present + * @req_driver: requesting DRM driver + * + * This function removes all graphics device drivers. Use this function on systems + * that can have their framebuffer located anywhere in memory. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +static inline int +drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver) +{ + return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, + req_driver); +} + +#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index ac5a28eff2c8..1701c2128a5c 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -896,6 +896,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** + * for_each_new_plane_in_state_reverse - other than only tracking new state, + * it's the same as for_each_oldnew_plane_in_state_reverse + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + */ +#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** * for_each_old_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 6bf8b2b78991..ba248ca8866f 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -58,12 +58,6 @@ struct drm_lock_data { * @refcount: Refcount for this master object. * @dev: Link back to the DRM device * @driver_priv: Pointer to driver-private information. - * @lessor: Lease holder - * @lessee_id: id for lessees. Owners always have id 0 - * @lessee_list: other lessees of the same master - * @lessees: drm_masters leasing from this one - * @leases: Objects leased to this drm_master. - * @lessee_idr: All lessees under this owner (only used where lessor == NULL) * * Note that master structures are only relevant for the legacy/primary device * nodes, hence there can only be one per device, not one per drm_minor. @@ -88,17 +82,68 @@ struct drm_master { struct idr magic_map; void *driver_priv; - /* Tree of display resource leases, each of which is a drm_master struct - * All of these get activated simultaneously, so drm_device master points - * at the top of the tree (for which lessor is NULL). Protected by - * &drm_device.mode_config.idr_mutex. + /** + * @lessor: + * + * Lease grantor, only set if this &struct drm_master represents a + * lessee holding a lease of objects from @lessor. Full owners of the + * device have this set to NULL. + * + * The lessor does not change once it's set in drm_lease_create(), and + * each lessee holds a reference to its lessor that it releases upon + * being destroyed in drm_lease_destroy(). + * + * See also the :ref:`section on display resource leasing + * <drm_leasing>`. */ - struct drm_master *lessor; + + /** + * @lessee_id: + * + * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0. + * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. + */ int lessee_id; + + /** + * @lessee_list: + * + * List entry of lessees of @lessor, where they are linked to @lessees. + * Not used for owners. Protected by &drm_device.mode_config's + * &drm_mode_config.idr_mutex. + */ struct list_head lessee_list; + + /** + * @lessees: + * + * List of drm_masters leasing from this one. Protected by + * &drm_device.mode_config's &drm_mode_config.idr_mutex. + * + * This list is empty if no leases have been granted, or if all lessees + * have been destroyed. Since lessors are referenced by all their + * lessees, this master cannot be destroyed unless the list is empty. + */ struct list_head lessees; + + /** + * @leases: + * + * Objects leased to this drm_master. Protected by + * &drm_device.mode_config's &drm_mode_config.idr_mutex. + * + * Objects are leased all together in drm_lease_create(), and are + * removed all together when the lease is revoked. + */ struct idr leases; + + /** + * @lessee_idr: + * + * All lessees under this owner (only used where @lessor is NULL). + * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. + */ struct idr lessee_idr; /* private: */ #if IS_ENABLED(CONFIG_DRM_LEGACY) @@ -107,6 +152,7 @@ struct drm_master { }; struct drm_master *drm_master_get(struct drm_master *master); +struct drm_master *drm_file_get_master(struct drm_file *file_priv); void drm_master_put(struct drm_master **master); bool drm_is_current_master(struct drm_file *fpriv); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 2195daa289d2..46bdfa48c413 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -171,6 +171,11 @@ struct drm_bridge_funcs { * signals) feeding it is still running when this callback is called. * * The @disable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_disable. */ void (*disable)(struct drm_bridge *bridge); @@ -190,6 +195,11 @@ struct drm_bridge_funcs { * called. * * The @post_disable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_post_disable. */ void (*post_disable)(struct drm_bridge *bridge); @@ -215,9 +225,9 @@ struct drm_bridge_funcs { * * NOTE: * - * If a need arises to store and access modes adjusted for other - * locations than the connection between the CRTC and the first bridge, - * the DRM framework will have to be extended with DRM bridge states. + * This is deprecated, do not use! + * New drivers shall set their mode in the + * &drm_bridge_funcs.atomic_enable operation. */ void (*mode_set)(struct drm_bridge *bridge, const struct drm_display_mode *mode, @@ -239,6 +249,11 @@ struct drm_bridge_funcs { * there is one) when this callback is called. * * The @pre_enable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_pre_enable. */ void (*pre_enable)(struct drm_bridge *bridge); @@ -259,6 +274,11 @@ struct drm_bridge_funcs { * chain if there is one. * * The @enable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_enable. */ void (*enable)(struct drm_bridge *bridge); diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index e9ad4863d915..cc9de1632dd3 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,6 +35,8 @@ #include <linux/scatterlist.h> +struct dma_buf_map; + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_sg(struct sg_table *st); void drm_clflush_virt_range(void *addr, unsigned long length); @@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void) #endif } +void drm_memcpy_init_early(void); + +void drm_memcpy_from_wc(struct dma_buf_map *dst, + const struct dma_buf_map *src, + unsigned long len); #endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 1922b278ffad..1647960c9e50 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -848,6 +848,11 @@ struct drm_connector_funcs { * locks to avoid races with concurrent modeset changes need to use * &drm_connector_helper_funcs.detect_ctx instead. * + * Also note that this callback can be called no matter the + * state the connector is in. Drivers that need the underlying + * device to be powered to perform the detection will first need + * to make sure it's been properly enabled. + * * RETURNS: * * drm_connector_status indicating the connector's status. @@ -1671,6 +1676,10 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, u32 scaling_mode_mask); int drm_connector_attach_vrr_capable_property( struct drm_connector *connector); +int drm_connector_attach_colorspace_property(struct drm_connector *connector); +int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector); +bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state, + struct drm_connector_state *new_state); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); @@ -1731,6 +1740,11 @@ void drm_mode_put_tile_group(struct drm_device *dev, * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and * drm_connector_list_iter_next() respectively the convenience macro * drm_for_each_connector_iter(). + * + * Note that the return value of drm_connector_list_iter_next() is only valid + * up to the next drm_connector_list_iter_next() or + * drm_connector_list_iter_end() call. If you want to use the connector later, + * then you need to grab your own reference first using drm_connector_get(). */ struct drm_connector_list_iter { /* private: */ diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h index 40c34a5bf149..effda42cce31 100644 --- a/include/drm/drm_damage_helper.h +++ b/include/drm/drm_damage_helper.h @@ -64,7 +64,6 @@ struct drm_atomic_helper_damage_iter { bool full_update; }; -void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, struct drm_plane_state *plane_state); int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, @@ -82,21 +81,4 @@ bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, struct drm_plane_state *state, struct drm_rect *rect); -/** - * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect. - * @state: Plane state. - * - * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect - * can be obtained by simply typecasting &drm_mode_rect. This is because both - * are signed 32 and during drm_atomic_check_only() it is verified that damage - * clips are inside fb. - * - * Return: Clips in plane fb_damage_clips blob property. - */ -static inline struct drm_rect * -drm_helper_get_plane_damage_clips(const struct drm_plane_state *state) -{ - return (struct drm_rect *)drm_plane_get_damage_clips(state); -} - #endif diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index d647223e8390..604b1d1b2d72 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -192,20 +192,6 @@ struct drm_device { struct list_head clientlist; /** - * @irq_enabled: - * - * Indicates that interrupt handling is enabled, specifically vblank - * handling. Drivers which don't use drm_irq_install() need to set this - * to true manually. - */ - bool irq_enabled; - - /** - * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers. - */ - int irq; - - /** * @vblank_disable_immediate: * * If true, vblank interrupt will be disabled immediately when the @@ -276,12 +262,6 @@ struct drm_device { */ spinlock_t event_lock; - /** @agp: AGP data */ - struct drm_agp_head *agp; - - /** @pdev: PCI device structure */ - struct pci_dev *pdev; - /** @num_crtcs: Number of CRTCs on this device */ unsigned int num_crtcs; @@ -329,6 +309,9 @@ struct drm_device { struct pci_controller *hose; #endif + /* AGP data */ + struct drm_agp_head *agp; + /* Context handle management - linked list of context handles */ struct list_head ctxlist; @@ -375,6 +358,10 @@ struct drm_device { /* Scatter gather memory */ struct drm_sg_mem *sg; + + /* IRQs */ + bool irq_enabled; + int irq; #endif }; diff --git a/include/drm/drm_dp_aux_bus.h b/include/drm/drm_dp_aux_bus.h new file mode 100644 index 000000000000..4f19b20b1dd6 --- /dev/null +++ b/include/drm/drm_dp_aux_bus.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Google Inc. + * + * The DP AUX bus is used for devices that are connected over a DisplayPort + * AUX bus. The devices on the far side of the bus are referred to as + * endpoints in this code. + */ + +#ifndef _DP_AUX_BUS_H_ +#define _DP_AUX_BUS_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +/** + * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints + * + * This is used to instantiate devices that are connected via a DP AUX + * bus. Usually the device is a panel, but conceivable other devices could + * be hooked up there. + */ +struct dp_aux_ep_device { + /** @dev: The normal dev pointer */ + struct device dev; + /** @aux: Pointer to the aux bus */ + struct drm_dp_aux *aux; +}; + +struct dp_aux_ep_driver { + int (*probe)(struct dp_aux_ep_device *aux_ep); + void (*remove)(struct dp_aux_ep_device *aux_ep); + void (*shutdown)(struct dp_aux_ep_device *aux_ep); + struct device_driver driver; +}; + +static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev) +{ + return container_of(dev, struct dp_aux_ep_device, dev); +} + +static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv) +{ + return container_of(drv, struct dp_aux_ep_driver, driver); +} + +int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux); +void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux); +int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux); + +#define dp_aux_dp_driver_register(aux_ep_drv) \ + __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE) +int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv, + struct module *owner); +void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv); + +#endif /* _DP_AUX_BUS_H_ */ diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h index 4c42db81fcb4..7ee482265087 100644 --- a/include/drm/drm_dp_dual_mode_helper.h +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -62,6 +62,7 @@ #define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 #define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 +struct drm_device; struct i2c_adapter; ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, @@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type { DRM_DP_DUAL_MODE_LSPCON, }; -enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); -int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, +enum drm_dp_dual_mode_type +drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter); -int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool *enabled); -int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, +int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter, bool enable); const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); -int drm_lspcon_get_mode(struct i2c_adapter *adapter, +int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, enum drm_lspcon_mode *current_mode); -int drm_lspcon_set_mode(struct i2c_adapter *adapter, +int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, enum drm_lspcon_mode reqd_mode); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1e85c2021f2f..1d5b3dbb6e56 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -29,6 +29,8 @@ #include <drm/drm_connector.h> struct drm_device; +struct drm_dp_aux; +struct drm_panel; /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -687,14 +689,14 @@ struct drm_device; #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ # define DP_DECOMPRESSION_EN (1 << 0) -#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ -# define DP_PSR_ENABLE (1 << 0) -# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) -# define DP_PSR_CRC_VERIFICATION (1 << 2) -# define DP_PSR_FRAME_CAPTURE (1 << 3) -# define DP_PSR_SELECTIVE_UPDATE (1 << 4) -# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) -# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE BIT(0) +# define DP_PSR_MAIN_LINK_ACTIVE BIT(1) +# define DP_PSR_CRC_VERIFICATION BIT(2) +# define DP_PSR_FRAME_CAPTURE BIT(3) +# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ +# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -1376,10 +1378,27 @@ enum drm_dp_phy { #define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ + +#define __DP_FEC1_BASE 0xf0290 /* 1.4 */ +#define __DP_FEC2_BASE 0xf0298 /* 1.4 */ +#define DP_FEC_BASE(dp_phy) \ + (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1))) + +#define DP_FEC_REG(dp_phy, fec1_reg) \ + (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) + #define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ +#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ + DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) + #define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ #define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ +#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ + +#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ + /* Repeater modes */ #define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ #define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ @@ -1482,10 +1501,13 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ #define DP_LTTPR_COMMON_CAP_SIZE 8 #define DP_LTTPR_PHY_CAP_SIZE 3 -void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_lttpr_link_train_clock_recovery_delay(void); -void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); @@ -1797,6 +1819,24 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) DP_MSA_TIMING_PAR_IGNORED; } +/** + * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support + * @edp_dpcd: The DPCD to check + * + * Note that currently this function will return %false for panels which support various DPCD + * backlight features but which require the brightness be set through PWM, and don't support setting + * the brightness level via the DPCD. This is a TODO. + * + * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false + * otherwise + */ +static inline bool +drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) +{ + return (edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) && + (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP); +} + /* * DisplayPort AUX channel */ @@ -1837,29 +1877,6 @@ struct drm_dp_aux_cec { /** * struct drm_dp_aux - DisplayPort AUX channel - * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter - * @ddc: I2C adapter that can be used for I2C-over-AUX communication - * @dev: pointer to struct device that is the parent for this AUX channel - * @crtc: backpointer to the crtc that is currently using this AUX channel - * @hw_mutex: internal mutex used for locking transfers - * @crc_work: worker that captures CRCs for each frame - * @crc_count: counter of captured frame CRCs - * @transfer: transfers a message representing a single AUX transaction - * - * The @dev field should be set to a pointer to the device that implements the - * AUX channel. - * - * The @name field may be used to specify the name of the I2C adapter. If set to - * %NULL, dev_name() of @dev will be used. - * - * Drivers provide a hardware-specific implementation of how transactions are - * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg - * structure describing the transaction is passed into this function. Upon - * success, the implementation should return the number of payload bytes that - * were transferred, or a negative error-code on failure. Helpers propagate - * errors from the @transfer() function, with the exception of the %-EBUSY - * error, which causes a transaction to be retried. On a short, helpers will - * return %-EPROTO to make it simpler to check for failure. * * An AUX channel can also be used to transport I2C messages to a sink. A * typical application of that is to access an EDID that's present in the sink @@ -1870,21 +1887,96 @@ struct drm_dp_aux_cec { * transfers by default; if a partial response is received, the adapter will * drop down to the size given by the partial response for this transaction * only. - * - * Note that the aux helper code assumes that the @transfer() function only - * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic - * and i2c helpers assume this is the case. */ struct drm_dp_aux { + /** + * @name: user-visible name of this AUX channel and the + * I2C-over-AUX adapter. + * + * It's also used to specify the name of the I2C adapter. If set + * to %NULL, dev_name() of @dev will be used. + */ const char *name; + + /** + * @ddc: I2C adapter that can be used for I2C-over-AUX + * communication + */ struct i2c_adapter ddc; + + /** + * @dev: pointer to struct device that is the parent for this + * AUX channel. + */ struct device *dev; + + /** + * @drm_dev: pointer to the &drm_device that owns this AUX channel. + * Beware, this may be %NULL before drm_dp_aux_register() has been + * called. + * + * It should be set to the &drm_device that will be using this AUX + * channel as early as possible. For many graphics drivers this should + * happen before drm_dp_aux_init(), however it's perfectly fine to set + * this field later so long as it's assigned before calling + * drm_dp_aux_register(). + */ + struct drm_device *drm_dev; + + /** + * @crtc: backpointer to the crtc that is currently using this + * AUX channel + */ struct drm_crtc *crtc; + + /** + * @hw_mutex: internal mutex used for locking transfers. + * + * Note that if the underlying hardware is shared among multiple + * channels, the driver needs to do additional locking to + * prevent concurrent access. + */ struct mutex hw_mutex; + + /** + * @crc_work: worker that captures CRCs for each frame + */ struct work_struct crc_work; + + /** + * @crc_count: counter of captured frame CRCs + */ u8 crc_count; + + /** + * @transfer: transfers a message representing a single AUX + * transaction. + * + * This is a hardware-specific implementation of how + * transactions are executed that the drivers must provide. + * + * A pointer to a &drm_dp_aux_msg structure describing the + * transaction is passed into this function. Upon success, the + * implementation should return the number of payload bytes that + * were transferred, or a negative error-code on failure. + * + * Helpers will propagate these errors, with the exception of + * the %-EBUSY error, which causes a transaction to be retried. + * On a short, helpers will return %-EPROTO to make it simpler + * to check for failure. + * + * The @transfer() function must only modify the reply field of + * the &drm_dp_aux_msg structure. The retry logic and i2c + * helpers assume this is the case. + * + * Also note that this callback can be called no matter the + * state @dev is in. Drivers that need that device to be powered + * to perform this operation will first need to make sure it's + * been properly enabled. + */ ssize_t (*transfer)(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); + /** * @i2c_nack_count: Counts I2C NACKs, used for DP validation. */ @@ -2096,6 +2188,51 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) return desc->quirks & BIT(quirk); } +/** + * struct drm_edp_backlight_info - Probed eDP backlight info struct + * @pwmgen_bit_count: The pwmgen bit count + * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any + * @max: The maximum backlight level that may be set + * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? + * @aux_enable: Does the panel support the AUX enable cap? + * + * This structure contains various data about an eDP backlight, which can be populated by using + * drm_edp_backlight_init(). + */ +struct drm_edp_backlight_info { + u8 pwmgen_bit_count; + u8 pwm_freq_pre_divider; + u16 max; + + bool lsb_reg_used : 1; + bool aux_enable : 1; +}; + +int +drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], + u16 *current_level, u8 *current_mode); +int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); + +#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) + +int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); + +#else + +static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, + struct drm_dp_aux *aux) +{ + return 0; +} + +#endif + #ifdef CONFIG_DRM_DP_CEC void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index bd1c39907b92..ddb9231d0309 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -594,6 +594,14 @@ struct drm_dp_mst_topology_mgr { */ int max_payloads; /** + * @max_lane_count: maximum number of lanes the GPU can drive. + */ + int max_lane_count; + /** + * @max_link_rate: maximum link rate per lane GPU can output, in kHz. + */ + int max_link_rate; + /** * @conn_base_id: DRM connector ID this mgr is connected to. Only used * to build the MST connector path value. */ @@ -765,7 +773,9 @@ struct drm_dp_mst_topology_mgr { int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, - int max_payloads, int conn_base_id); + int max_payloads, + int max_lane_count, int max_link_rate, + int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); @@ -783,7 +793,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index b439ae1921b8..0cd95953cdf5 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -137,10 +137,6 @@ enum drm_driver_feature { * @DRIVER_HAVE_IRQ: * * Legacy irq support. Only for legacy drivers. Do not use. - * - * New drivers can either use the drm_irq_install() and - * drm_irq_uninstall() helper functions, or roll their own irq support - * code by calling request_irq() directly. */ DRIVER_HAVE_IRQ = BIT(30), /** @@ -272,42 +268,6 @@ struct drm_driver { void (*release) (struct drm_device *); /** - * @irq_handler: - * - * Interrupt handler called when using drm_irq_install(). Not used by - * drivers which implement their own interrupt handling. - */ - irqreturn_t(*irq_handler) (int irq, void *arg); - - /** - * @irq_preinstall: - * - * Optional callback used by drm_irq_install() which is called before - * the interrupt handler is registered. This should be used to clear out - * any pending interrupts (from e.g. firmware based drives) and reset - * the interrupt handling registers. - */ - void (*irq_preinstall) (struct drm_device *dev); - - /** - * @irq_postinstall: - * - * Optional callback used by drm_irq_install() which is called after - * the interrupt handler is registered. This should be used to enable - * interrupt generation in the hardware. - */ - int (*irq_postinstall) (struct drm_device *dev); - - /** - * @irq_uninstall: - * - * Optional callback used by drm_irq_uninstall() which is called before - * the interrupt handler is unregistered. This should be used to disable - * interrupt generation in the hardware. - */ - void (*irq_uninstall) (struct drm_device *dev); - - /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. @@ -504,6 +464,10 @@ struct drm_driver { int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_quiescent) (struct drm_device *); int (*context_dtor) (struct drm_device *dev, int context); + irqreturn_t (*irq_handler)(int irq, void *arg); + void (*irq_preinstall)(struct drm_device *dev); + int (*irq_postinstall)(struct drm_device *dev); + void (*irq_uninstall)(struct drm_device *dev); u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 759328a5eeb2..deccfd39e6db 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -336,7 +336,7 @@ struct edid { u8 features; /* Color characteristics */ u8 red_green_lo; - u8 black_white_lo; + u8 blue_white_lo; u8 red_x; u8 red_y; u8 green_x; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 795aea1d0a25..6447e34528f8 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -4,6 +4,7 @@ #include <linux/types.h> +struct drm_device; struct drm_framebuffer; struct drm_plane_state; @@ -14,5 +15,9 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); +void drm_fb_cma_sync_non_coherent(struct drm_device *drm, + struct drm_plane_state *old_state, + struct drm_plane_state *state); + #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 3b273f9ca39a..3af4624368d8 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -36,7 +36,6 @@ struct drm_fb_helper; #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <linux/kgdb.h> -#include <linux/vgaarb.h> enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET, @@ -451,54 +450,4 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) #endif -/** - * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers - * @a: memory range, users of which are to be removed - * @name: requesting driver name - * @primary: also kick vga16fb if present - * - * This function removes framebuffer devices (initialized by firmware/bootloader) - * which use memory range described by @a. If @a is NULL all such devices are - * removed. - */ -static inline int -drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, - const char *name, bool primary) -{ -#if IS_REACHABLE(CONFIG_FB) - return remove_conflicting_framebuffers(a, name, primary); -#else - return 0; -#endif -} - -/** - * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices - * @pdev: PCI device - * @name: requesting driver name - * - * This function removes framebuffer devices (eg. initialized by firmware) - * using memory range configured for any of @pdev's memory bars. - * - * The function assumes that PCI device with shadowed ROM drives a primary - * display and so kicks out vga16fb. - */ -static inline int -drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const char *name) -{ - int ret = 0; - - /* - * WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. - */ -#if IS_REACHABLE(CONFIG_FB) - ret = remove_conflicting_pci_framebuffers(pdev, name); -#endif - if (ret == 0) - ret = vga_remove_vgacon(pdev); - return ret; -} - #endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index b81b3bfb08c8..a3acb7ac3550 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -226,15 +226,31 @@ struct drm_file { /** * @master: * - * Master this node is currently associated with. Only relevant if - * drm_is_primary_client() returns true. Note that this only - * matches &drm_device.master if the master is the currently active one. + * Master this node is currently associated with. Protected by struct + * &drm_device.master_mutex, and serialized by @master_lookup_lock. + * + * Only relevant if drm_is_primary_client() returns true. Note that + * this only matches &drm_device.master if the master is the currently + * active one. + * + * To update @master, both &drm_device.master_mutex and + * @master_lookup_lock need to be held, therefore holding either of + * them is safe and enough for the read side. + * + * When dereferencing this pointer, either hold struct + * &drm_device.master_mutex for the duration of the pointer's use, or + * use drm_file_get_master() if struct &drm_device.master_mutex is not + * currently held and there is no other need to hold it. This prevents + * @master from being freed during use. * * See also @authentication and @is_master and the :ref:`section on * primary nodes and authentication <drm_primary_node>`. */ struct drm_master *master; + /** @master_lock: Serializes @master. */ + spinlock_t master_lookup_lock; + /** @pid: Process that opened this file. */ struct pid *pid; diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 5f9e37032468..4e0258a61311 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -11,7 +11,7 @@ struct drm_rect; void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); -void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, +void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, @@ -28,4 +28,12 @@ void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_rect *clip); +int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, + uint32_t dst_format, void *vmap, + struct drm_framebuffer *fb, + struct drm_rect *rect); +int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch, + uint32_t dst_format, void *vmap, + struct drm_framebuffer *fb); + #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 156b122c0ad5..22aa64d07c79 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -25,6 +25,11 @@ #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> +/** + * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have + */ +#define DRM_FORMAT_MAX_PLANES 4u + /* * DRM formats are little endian. Define host endian variants for the * most common formats here, to reduce the #ifdefs needed in drivers. @@ -78,7 +83,7 @@ struct drm_format_info { * triplet @char_per_block, @block_w, @block_h for better * describing the pixel format. */ - u8 cpp[4]; + u8 cpp[DRM_FORMAT_MAX_PLANES]; /** * @char_per_block: @@ -104,7 +109,7 @@ struct drm_format_info { * information from their drm_mode_config.get_format_info hook * if they want the core to be validating the pitch. */ - u8 char_per_block[4]; + u8 char_per_block[DRM_FORMAT_MAX_PLANES]; }; /** @@ -113,7 +118,7 @@ struct drm_format_info { * Block width in pixels, this is intended to be accessed through * drm_format_info_block_width() */ - u8 block_w[4]; + u8 block_w[DRM_FORMAT_MAX_PLANES]; /** * @block_h: @@ -121,7 +126,7 @@ struct drm_format_info { * Block height in pixels, this is intended to be accessed through * drm_format_info_block_height() */ - u8 block_h[4]; + u8 block_h[DRM_FORMAT_MAX_PLANES]; /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; @@ -136,14 +141,6 @@ struct drm_format_info { }; /** - * struct drm_format_name_buf - name of a DRM format - * @str: string buffer containing the format name - */ -struct drm_format_name_buf { - char str[32]; -}; - -/** * drm_format_info_is_yuv_packed - check that the format info matches a YUV * format with data laid in a single plane * @info: format info @@ -318,6 +315,5 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info, int plane); uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, int plane, unsigned int buffer_width); -const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf); #endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index be658ebbec72..f67c5b7bcb68 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -27,12 +27,12 @@ #include <linux/list.h> #include <linux/sched.h> +#include <drm/drm_fourcc.h> #include <drm/drm_mode_object.h> struct drm_clip_rect; struct drm_device; struct drm_file; -struct drm_format_info; struct drm_framebuffer; struct drm_gem_object; @@ -147,7 +147,7 @@ struct drm_framebuffer { * @pitches: Line stride per buffer. For userspace created object this * is copied from drm_mode_fb_cmd2. */ - unsigned int pitches[4]; + unsigned int pitches[DRM_FORMAT_MAX_PLANES]; /** * @offsets: Offset from buffer start to the actual pixel data in bytes, * per buffer. For userspace created object this is copied from @@ -165,7 +165,7 @@ struct drm_framebuffer { * data (even for linear buffers). Specifying an x/y pixel offset is * instead done through the source rectangle in &struct drm_plane_state. */ - unsigned int offsets[4]; + unsigned int offsets[DRM_FORMAT_MAX_PLANES]; /** * @modifier: Data layout modifier. This is used to describe * tiling, or also special layouts (like compression) of auxiliary @@ -210,7 +210,7 @@ struct drm_framebuffer { * This is used by the GEM framebuffer helpers, see e.g. * drm_gem_fb_create(). */ - struct drm_gem_object *obj[4]; + struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES]; }; #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 240049566592..35e7f44c2a75 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -384,8 +384,6 @@ drm_gem_object_put(struct drm_gem_object *obj) __drm_gem_object_put(obj); } -void drm_gem_object_put_locked(struct drm_gem_object *obj); - int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index cfc5adee3d13..48222a107873 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -5,6 +5,7 @@ #include <linux/dma-buf-map.h> +#include <drm/drm_fourcc.h> #include <drm/drm_plane.h> struct drm_simple_display_pipe; @@ -40,7 +41,15 @@ struct drm_shadow_plane_state { * The memory mappings stored in map should be established in the plane's * prepare_fb callback and removed in the cleanup_fb callback. */ - struct dma_buf_map map[4]; + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + + /** + * @data: Address of each framebuffer BO's data + * + * The address of the data stored in each mapping. This is different + * for framebuffers with non-zero offset fields. + */ + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; }; /** @@ -53,6 +62,12 @@ to_drm_shadow_plane_state(struct drm_plane_state *state) return container_of(state, struct drm_shadow_plane_state, base); } +void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, + struct drm_shadow_plane_state *new_shadow_plane_state); +void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state); +void __drm_gem_reset_shadow_plane(struct drm_plane *plane, + struct drm_shadow_plane_state *shadow_plane_state); + void drm_gem_reset_shadow_plane(struct drm_plane *plane); struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane); void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 0a9711caa3e8..cd13508acbc1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -16,6 +16,7 @@ struct drm_mode_create_dumb; * more than one entry but they are guaranteed to have contiguous * DMA addresses. * @vaddr: kernel virtual address of the backing memory + * @map_noncoherent: if true, the GEM object is backed by non-coherent memory */ struct drm_gem_cma_object { struct drm_gem_object base; @@ -24,6 +25,8 @@ struct drm_gem_cma_object { /* For objects with DMA memory allocated by GEM CMA */ void *vaddr; + + bool map_noncoherent; }; #define to_drm_gem_cma_obj(gem_obj) \ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 6bdffc7aa124..905727719ead 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -1,6 +1,11 @@ #ifndef __DRM_GEM_FB_HELPER_H__ #define __DRM_GEM_FB_HELPER_H__ +#include <linux/dma-buf.h> +#include <linux/dma-buf-map.h> + +#include <drm/drm_fourcc.h> + struct drm_afbc_framebuffer; struct drm_device; struct drm_fb_helper_surface_size; @@ -34,6 +39,14 @@ struct drm_framebuffer * drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); +int drm_gem_fb_vmap(struct drm_framebuffer *fb, + struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]); +void drm_gem_fb_vunmap(struct drm_framebuffer *fb, + struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); +int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); +void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); + #define drm_is_afbc(modifier) \ (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 7c6d874910b8..c1aa02bd4c89 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -5,8 +5,8 @@ #include <linux/kernel.h> -#include <drm/drm_gem.h> #include <drm/drm_device.h> +#include <drm/drm_gem.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -24,4 +24,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); +int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); + #endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 288055d397d9..d3cf06c9af65 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -5,6 +5,7 @@ #include <drm/drm_file.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_modes.h> #include <drm/ttm/ttm_bo_api.h> @@ -93,7 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, size_t size, unsigned long pg_align); void drm_gem_vram_put(struct drm_gem_vram_object *gbo); -u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); @@ -113,9 +113,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file, int drm_gem_vram_driver_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, - struct drm_device *dev, - uint32_t handle, uint64_t *offset); /* * Helpers for struct drm_plane_helper_funcs @@ -127,6 +124,18 @@ void drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); +/** + * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - + * Initializes struct drm_plane_helper_funcs for VRAM handling + * + * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This + * macro initializes struct drm_plane_helper_funcs to use the respective helper + * functions. + */ +#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \ + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \ + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb + /* * Helpers for struct drm_simple_display_pipe_funcs */ @@ -149,7 +158,7 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( #define DRM_GEM_VRAM_DRIVER \ .debugfs_init = drm_vram_mm_debugfs_init, \ .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ .gem_prime_mmap = drm_gem_prime_mmap /* @@ -195,10 +204,6 @@ void drm_vram_mm_debugfs_init(struct drm_minor *minor); * Helpers for integration with struct drm_device */ -struct drm_vram_mm *drm_vram_helper_alloc_mm( - struct drm_device *dev, uint64_t vram_base, size_t vram_size); -void drm_vram_helper_release_mm(struct drm_device *dev); - int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base, size_t vram_size); diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 10100a4bbe2a..afb27cb6a7bd 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); #define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) #define DRM_MAJOR 226 /** diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h deleted file mode 100644 index 631b22f9757d..000000000000 --- a/include/drm/drm_irq.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2016 Intel Corp. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _DRM_IRQ_H_ -#define _DRM_IRQ_H_ - -struct drm_device; - -int drm_irq_install(struct drm_device *dev, int irq); -int drm_irq_uninstall(struct drm_device *dev); -int devm_drm_irq_install(struct drm_device *dev, int irq); -#endif diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 8ed04e9be997..58dc8d8cc907 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -33,6 +33,8 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/agp_backend.h> + #include <drm/drm.h> #include <drm/drm_auth.h> #include <drm/drm_hashtab.h> @@ -190,14 +192,13 @@ do { \ void drm_legacy_idlelock_take(struct drm_lock_data *lock); void drm_legacy_idlelock_release(struct drm_lock_data *lock); +/* drm_irq.c */ +int drm_legacy_irq_uninstall(struct drm_device *dev); + /* drm_pci.c */ #ifdef CONFIG_PCI -struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, - size_t align); -void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah); - int drm_legacy_pci_init(const struct drm_driver *driver, struct pci_driver *pdriver); void drm_legacy_pci_exit(const struct drm_driver *driver, @@ -229,6 +230,86 @@ static inline void drm_legacy_pci_exit(const struct drm_driver *driver, #endif +/* + * AGP Support + */ + +struct drm_agp_head { + struct agp_kern_info agp_info; + struct list_head memory; + unsigned long mode; + struct agp_bridge_data *bridge; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +}; + +#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) +struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); +int drm_legacy_agp_acquire(struct drm_device *dev); +int drm_legacy_agp_release(struct drm_device *dev); +int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +#else +static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline int drm_legacy_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} +#endif + /* drm_memory.c */ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index f543d6e3e822..05e194958265 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -183,7 +183,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, #define mipi_dbi_command(dbi, cmd, seq...) \ ({ \ const u8 d[] = { seq }; \ - mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ + struct device *dev = &(dbi)->spi->dev; \ + int ret; \ + ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ + if (ret) \ + dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \ + ret; \ }) #ifdef CONFIG_DEBUG_FS diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 360e6377e84b..af7ba8071eb0 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -80,6 +80,11 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, * Note that typically DSI packet transmission is atomic, so the .transfer() * function will seldomly return anything other than the number of bytes * contained in the transmit buffer on success. + * + * Also note that those callbacks can be called no matter the state the + * host is in. Drivers that need the underlying device to be powered to + * perform these operations will first need to make sure it's been + * properly enabled. */ struct mipi_dsi_host_ops { int (*attach)(struct mipi_dsi_host *host, @@ -119,15 +124,15 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); /* enable hsync-end packets in vsync-pulse and v-porch area */ #define MIPI_DSI_MODE_VIDEO_HSE BIT(4) /* disable hfront-porch area */ -#define MIPI_DSI_MODE_VIDEO_HFP BIT(5) +#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5) /* disable hback-porch area */ -#define MIPI_DSI_MODE_VIDEO_HBP BIT(6) +#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6) /* disable hsync-active area */ -#define MIPI_DSI_MODE_VIDEO_HSA BIT(7) +#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7) /* flush display FIFO on vsync pulse */ #define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) /* disable EoT packets in HS mode */ -#define MIPI_DSI_MODE_EOT_PACKET BIT(9) +#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9) /* device supports non-continuous clock behavior (DSI spec 5.6.1) */ #define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) /* transmit data in low power */ diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index ab424ddd7665..1ddf7783fdf7 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -909,6 +909,8 @@ struct drm_mode_config { * @allow_fb_modifiers: * * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + * Note that drivers should not set this directly, it is automatically + * set in drm_universal_plane_init(). * * IMPORTANT: * diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index f3a4b47b3986..fdfa9f37ce05 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1178,8 +1178,11 @@ struct drm_plane_helper_funcs { * equivalent functionality should be implemented through private * members in the plane structure. * - * Drivers which always have their buffers pinned should use - * drm_gem_plane_helper_prepare_fb() for this hook. + * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook + * set drm_gem_plane_helper_prepare_fb() is called automatically to + * implement this. Other drivers which need additional plane processing + * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb + * hook. * * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 33605c3f0eba..4602f833eb51 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -64,8 +64,8 @@ enum drm_panel_orientation; * the panel. This is the job of the .unprepare() function. * * Backlight can be handled automatically if configured using - * drm_panel_of_backlight(). Then the driver does not need to implement the - * functionality to enable/disable backlight. + * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver + * does not need to implement the functionality to enable/disable backlight. */ struct drm_panel_funcs { /** @@ -144,8 +144,8 @@ struct drm_panel { * Backlight device, used to turn on backlight after the call * to enable(), and to turn off backlight before the call to * disable(). - * backlight is set by drm_panel_of_backlight() and drivers - * shall not assign it. + * backlight is set by drm_panel_of_backlight() or + * drm_panel_dp_aux_backlight() and drivers shall not assign it. */ struct backlight_device *backlight; diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 1294610e84f4..fed97e35626f 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -186,6 +186,9 @@ struct drm_plane_state { * since last plane update) as an array of &drm_mode_rect in framebuffer * coodinates of the attached framebuffer. Note that unlike plane src, * damage clips are not in 16.16 fixed point. + * + * See drm_plane_get_damage_clips() and + * drm_plane_get_damage_clips_count() for accessing these. */ struct drm_property_blob *fb_damage_clips; @@ -894,38 +897,12 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier); -/** - * drm_plane_get_damage_clips_count - Returns damage clips count. - * @state: Plane state. - * - * Simple helper to get the number of &drm_mode_rect clips set by user-space - * during plane update. - * - * Return: Number of clips in plane fb_damage_clips blob property. - */ -static inline unsigned int -drm_plane_get_damage_clips_count(const struct drm_plane_state *state) -{ - return (state && state->fb_damage_clips) ? - state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0; -} -/** - * drm_plane_get_damage_clips - Returns damage clips. - * @state: Plane state. - * - * Note that this function returns uapi type &drm_mode_rect. Drivers might - * instead be interested in internal &drm_rect which can be obtained by calling - * drm_helper_get_plane_damage_clips(). - * - * Return: Damage clips in plane fb_damage_clips blob property. - */ -static inline struct drm_mode_rect * -drm_plane_get_damage_clips(const struct drm_plane_state *state) -{ - return (struct drm_mode_rect *)((state && state->fb_damage_clips) ? - state->fb_damage_clips->data : NULL); -} +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); +unsigned int +drm_plane_get_damage_clips_count(const struct drm_plane_state *state); +struct drm_mode_rect * +drm_plane_get_damage_clips(const struct drm_plane_state *state); int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a3c58c941bdc..15a089a87c22 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -327,7 +327,7 @@ static inline bool drm_debug_enabled(enum drm_debug_category category) /* * struct device based logging * - * Prefer drm_device based logging over device or prink based logging. + * Prefer drm_device based logging over device or printk based logging. */ __printf(3, 4) @@ -443,25 +443,25 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category, #define drm_dbg_core(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ - drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) /* diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index bbf5c1fdd7b0..65bc9710a470 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -31,7 +31,6 @@ /** * struct drm_property_enum - symbolic values for enumerations - * @value: numeric property value for this enum entry * @head: list of enum values, linked to &drm_property.enum_list * @name: symbolic name for the enum * @@ -39,6 +38,14 @@ * decoding for each value. This is used for example for the rotation property. */ struct drm_property_enum { + /** + * @value: numeric property value for this enum entry + * + * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a + * bitshift, not a bitmask. In other words, the enum entry is enabled + * if the bit number @value is set in the property's value. This enum + * entry has the bitmask ``1 << value``. + */ uint64_t value; struct list_head head; char name[DRM_PROP_NAME_LEN]; diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 39f2deee709c..6f6e19bd4dac 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -39,6 +39,9 @@ * @x2: horizontal ending coordinate (exclusive) * @y1: vertical starting coordinate (inclusive) * @y2: vertical ending coordinate (exclusive) + * + * Note that this must match the layout of struct drm_mode_rect or the damage + * helpers like drm_atomic_helper_damage_iter_init() break. */ struct drm_rect { int x1, y1, x2, y2; diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index ef9944e9c5fc..0b3647e614dd 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -116,8 +116,11 @@ struct drm_simple_display_pipe_funcs { * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for * more details. * - * Drivers which always have their buffers pinned should use - * drm_gem_simple_display_pipe_prepare_fb() for this hook. + * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook + * set drm_gem_simple_display_pipe_prepare_fb() is called automatically + * to implement this. Other drivers which need additional plane + * processing can call drm_gem_simple_display_pipe_prepare_fb() from + * their @prepare_fb hook. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -151,6 +154,33 @@ struct drm_simple_display_pipe_funcs { void (*disable_vblank)(struct drm_simple_display_pipe *pipe); /** + * @reset_crtc: + * + * Optional, called by &drm_crtc_funcs.reset. Please read the + * documentation for the &drm_crtc_funcs.reset hook for more details. + */ + void (*reset_crtc)(struct drm_simple_display_pipe *pipe); + + /** + * @duplicate_crtc_state: + * + * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please + * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state + * hook for more details. + */ + struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe); + + /** + * @destroy_crtc_state: + * + * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please + * read the documentation for the &drm_crtc_funcs.atomic_destroy_state + * hook for more details. + */ + void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state); + + /** * @reset_plane: * * Optional, called by &drm_plane_funcs.reset. Please read the diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 76ac5e97a559..4f8c35206f7c 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -53,7 +53,7 @@ struct drm_vma_offset_node { rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_root vm_files; - bool readonly:1; + void *driver_private; }; struct drm_vma_offset_manager { diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 10225a0a35d0..88ae7f331bb1 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -239,6 +239,38 @@ struct drm_sched_backend_ops { * @timedout_job: Called when a job has taken too long to execute, * to trigger GPU recovery. * + * This method is called in a workqueue context. + * + * Drivers typically issue a reset to recover from GPU hangs, and this + * procedure usually follows the following workflow: + * + * 1. Stop the scheduler using drm_sched_stop(). This will park the + * scheduler thread and cancel the timeout work, guaranteeing that + * nothing is queued while we reset the hardware queue + * 2. Try to gracefully stop non-faulty jobs (optional) + * 3. Issue a GPU reset (driver-specific) + * 4. Re-submit jobs using drm_sched_resubmit_jobs() + * 5. Restart the scheduler using drm_sched_start(). At that point, new + * jobs can be queued, and the scheduler thread is unblocked + * + * Note that some GPUs have distinct hardware queues but need to reset + * the GPU globally, which requires extra synchronization between the + * timeout handler of the different &drm_gpu_scheduler. One way to + * achieve this synchronization is to create an ordered workqueue + * (using alloc_ordered_workqueue()) at the driver level, and pass this + * queue to drm_sched_init(), to guarantee that timeout handlers are + * executed sequentially. The above workflow needs to be slightly + * adjusted in that case: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop() + * 2. Try to gracefully stop non-faulty jobs on all queues impacted by + * the reset (optional) + * 3. Issue a GPU reset on all faulty queues (driver-specific) + * 4. Re-submit jobs on all schedulers impacted by the reset using + * drm_sched_resubmit_jobs() + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start() + * * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, * and the underlying driver has started or completed recovery. * @@ -269,13 +301,14 @@ struct drm_sched_backend_ops { * finished. * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @timeout_wq: workqueue used to queue @work_tdr * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked - * guilty and it will be considered for scheduling further. + * guilty and it will no longer be considered for scheduling. * @score: score to help loadbalancer pick a idle sched * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work @@ -293,6 +326,7 @@ struct drm_gpu_scheduler { wait_queue_head_t job_scheduled; atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *timeout_wq; struct delayed_work work_tdr; struct task_struct *thread; struct list_head pending_list; @@ -306,7 +340,8 @@ struct drm_gpu_scheduler { int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, long timeout, + uint32_t hw_submission, unsigned hang_limit, + long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index ebd0dd1c35b3..eee18fa53b54 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -640,9 +640,32 @@ INTEL_VGA_DEVICE(0x4681, info), \ INTEL_VGA_DEVICE(0x4682, info), \ INTEL_VGA_DEVICE(0x4683, info), \ + INTEL_VGA_DEVICE(0x4688, info), \ + INTEL_VGA_DEVICE(0x4689, info), \ INTEL_VGA_DEVICE(0x4690, info), \ INTEL_VGA_DEVICE(0x4691, info), \ INTEL_VGA_DEVICE(0x4692, info), \ INTEL_VGA_DEVICE(0x4693, info) +/* ADL-P */ +#define INTEL_ADLP_IDS(info) \ + INTEL_VGA_DEVICE(0x46A0, info), \ + INTEL_VGA_DEVICE(0x46A1, info), \ + INTEL_VGA_DEVICE(0x46A2, info), \ + INTEL_VGA_DEVICE(0x46A3, info), \ + INTEL_VGA_DEVICE(0x46A6, info), \ + INTEL_VGA_DEVICE(0x46A8, info), \ + INTEL_VGA_DEVICE(0x46AA, info), \ + INTEL_VGA_DEVICE(0x462A, info), \ + INTEL_VGA_DEVICE(0x4626, info), \ + INTEL_VGA_DEVICE(0x4628, info), \ + INTEL_VGA_DEVICE(0x46B0, info), \ + INTEL_VGA_DEVICE(0x46B1, info), \ + INTEL_VGA_DEVICE(0x46B2, info), \ + INTEL_VGA_DEVICE(0x46B3, info), \ + INTEL_VGA_DEVICE(0x46C0, info), \ + INTEL_VGA_DEVICE(0x46C1, info), \ + INTEL_VGA_DEVICE(0x46C2, info), \ + INTEL_VGA_DEVICE(0x46C3, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2155e2e38aec..f681bbdbc698 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -86,6 +86,7 @@ struct ttm_tt; * @base: drm_gem_object superclass data. * @bdev: Pointer to the buffer object device structure. * @type: The bo type. + * @page_alignment: Page alignment. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. * @kref: Reference count of this buffer object. When this refcount reaches @@ -123,6 +124,7 @@ struct ttm_buffer_object { struct ttm_device *bdev; enum ttm_bo_type type; + uint32_t page_alignment; void (*destroy) (struct ttm_buffer_object *); /** @@ -134,7 +136,7 @@ struct ttm_buffer_object { * Members protected by the bo::resv::reserved lock. */ - struct ttm_resource mem; + struct ttm_resource *resource; struct ttm_tt *ttm; bool deleted; @@ -523,19 +525,6 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); /** - * ttm_bo_mmap - mmap out of the ttm device address space. - * - * @filp: filp as input from the mmap method. - * @vma: vma as input from the mmap method. - * @bdev: Pointer to the ttm_device with the address space manager. - * - * This function is intended to be called by the device mmap method. - * if the device address space is to be backed by the bo manager. - */ -int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_device *bdev); - -/** * ttm_bo_io * * @bdev: Pointer to the struct ttm_device. @@ -563,25 +552,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, gfp_t gfp_flags); /** - * ttm_bo_uses_embedded_gem_object - check if the given bo uses the - * embedded drm_gem_object. - * - * Most ttm drivers are using gem too, so the embedded - * ttm_buffer_object.base will be initialized by the driver (before - * calling ttm_bo_init). It is also possible to use ttm without gem - * though (vmwgfx does that). - * - * This helper will figure whenever a given ttm bo is a gem object too - * or not. - * - * @bo: The bo to check. - */ -static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) -{ - return bo->base.dev != NULL; -} - -/** * ttm_bo_pin - Pin the buffer object. * @bo: The buffer object to pin * @@ -637,4 +607,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); +vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); + #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index dbccac957f8f..68d6069572aa 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -40,6 +40,7 @@ #include <drm/ttm/ttm_device.h> #include "ttm_bo_api.h" +#include "ttm_kmap_iter.h" #include "ttm_placement.h" #include "ttm_tt.h" #include "ttm_pool.h" @@ -96,7 +97,7 @@ struct ttm_lru_bulk_move { */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_resource *mem, + struct ttm_resource **mem, struct ttm_operation_ctx *ctx); /** @@ -181,15 +182,15 @@ static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { spin_lock(&bo->bdev->lru_lock); - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); spin_unlock(&bo->bdev->lru_lock); } static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - bo->mem = *new_mem; - new_mem->mm_node = NULL; + WARN_ON(bo->resource); + bo->resource = new_mem; } /** @@ -202,9 +203,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, struct ttm_resource *new_mem) { - struct ttm_resource *old_mem = &bo->mem; - - WARN_ON(old_mem->mm_node != NULL); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); } @@ -273,6 +272,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem); /** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @new_mem: struct ttm_resource indicating where to move. + * + * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed + * by the caller to be idle. Typically used after memcpy buffer moves. + */ +static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); + + WARN_ON(ret); +} + +/** * ttm_bo_pipeline_gutting. * * @bo: A pointer to a struct ttm_buffer_object. @@ -306,30 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); */ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); -/** - * ttm_range_man_init - * - * @bdev: ttm device - * @type: memory manager type - * @use_tt: if the memory manager uses tt - * @p_size: size of area to be managed in pages. - * - * Initialise a generic range manager for the selected memory type. - * The range manager is installed for this device in the type slot. - */ -int ttm_range_man_init(struct ttm_device *bdev, - unsigned type, bool use_tt, - unsigned long p_size); - -/** - * ttm_range_man_fini - * - * @bdev: ttm device - * @type: memory manager type - * - * Remove the generic range manager from a slot and tear it down. - */ -int ttm_range_man_fini(struct ttm_device *bdev, - unsigned type); +void ttm_move_memcpy(struct ttm_buffer_object *bo, + u32 num_pages, + struct ttm_kmap_iter *dst_iter, + struct ttm_kmap_iter *src_iter); +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); #endif diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index a0b4a49fa432..3c9dd65f5aaf 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -33,4 +33,6 @@ enum ttm_caching { ttm_cached }; +pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp); + #endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 7c8f87bd52d3..cd592f8e941b 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -162,21 +162,6 @@ struct ttm_device_funcs { struct ttm_place *hop); /** - * struct ttm_bo_driver_member verify_access - * - * @bo: Pointer to a buffer object. - * @filp: Pointer to a struct file trying to access the object. - * - * Called from the map / write / read methods to verify that the - * caller is permitted to access the buffer object. - * This member may be set to NULL, which will refuse this kind of - * access for all buffer objects. - * This function should return 0 if access is granted, -EPERM otherwise. - */ - int (*verify_access)(struct ttm_buffer_object *bo, - struct file *filp); - - /** * Hook to notify driver about a resource delete. */ void (*delete_mem_notify)(struct ttm_buffer_object *bo); diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h new file mode 100644 index 000000000000..8bb00fd39d6c --- /dev/null +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef __TTM_KMAP_ITER_H__ +#define __TTM_KMAP_ITER_H__ + +#include <linux/types.h> + +struct ttm_kmap_iter; +struct dma_buf_map; + +/** + * struct ttm_kmap_iter_ops - Ops structure for a struct + * ttm_kmap_iter. + * @maps_tt: Whether the iterator maps TT memory directly, as opposed + * mapping a TT through an aperture. Both these modes have + * struct ttm_resource_manager::use_tt set, but the latter typically + * returns is_iomem == true from ttm_mem_io_reserve. + */ +struct ttm_kmap_iter_ops { + /** + * kmap_local() - Map a PAGE_SIZE part of the resource using + * kmap_local semantics. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct dma_buf_map holding the virtual address after + * the operation. + * @i: The location within the resource to map. PAGE_SIZE granularity. + */ + void (*map_local)(struct ttm_kmap_iter *res_iter, + struct dma_buf_map *dmap, pgoff_t i); + /** + * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * mapped using kmap_local. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct dma_buf_map holding the virtual address after + * the operation. + */ + void (*unmap_local)(struct ttm_kmap_iter *res_iter, + struct dma_buf_map *dmap); + bool maps_tt; +}; + +/** + * struct ttm_kmap_iter - Iterator for kmap_local type operations on a + * resource. + * @ops: Pointer to the operations struct. + * + * This struct is intended to be embedded in a resource-specific specialization + * implementing operations for the resource. + * + * Nothing stops us from extending the operations to vmap, vmap_pfn etc, + * replacing some or parts of the ttm_bo_util. cpu-map functionality. + */ +struct ttm_kmap_iter { + const struct ttm_kmap_iter_ops *ops; +}; + +#endif /* __TTM_KMAP_ITER_H__ */ diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index aa6ba4d0cf78..8995c9e4ec1b 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -47,8 +47,11 @@ * top of the memory area, instead of the bottom. */ -#define TTM_PL_FLAG_CONTIGUOUS (1 << 19) -#define TTM_PL_FLAG_TOPDOWN (1 << 22) +#define TTM_PL_FLAG_CONTIGUOUS (1 << 0) +#define TTM_PL_FLAG_TOPDOWN (1 << 1) + +/* For multihop handling */ +#define TTM_PL_FLAG_TEMPORARY (1 << 2) /** * struct ttm_place diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h new file mode 100644 index 000000000000..22b6fa42ac20 --- /dev/null +++ b/include/drm/ttm/ttm_range_manager.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _TTM_RANGE_MANAGER_H_ +#define _TTM_RANGE_MANAGER_H_ + +#include <drm/ttm/ttm_resource.h> +#include <drm/drm_mm.h> + +/** + * struct ttm_range_mgr_node + * + * @base: base clase we extend + * @mm_nodes: MM nodes, usually 1 + * + * Extending the ttm_resource object to manage an address space allocation with + * one or more drm_mm_nodes. + */ +struct ttm_range_mgr_node { + struct ttm_resource base; + struct drm_mm_node mm_nodes[]; +}; + +/** + * to_ttm_range_mgr_node + * + * @res: the resource to upcast + * + * Upcast the ttm_resource object into a ttm_range_mgr_node object. + */ +static inline struct ttm_range_mgr_node * +to_ttm_range_mgr_node(struct ttm_resource *res) +{ + return container_of(res, struct ttm_range_mgr_node, base); +} + +int ttm_range_man_init(struct ttm_device *bdev, + unsigned type, bool use_tt, + unsigned long p_size); +int ttm_range_man_fini(struct ttm_device *bdev, + unsigned type); + +#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 6164ccf4f308..140b6b9a8bbe 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -27,9 +27,11 @@ #include <linux/types.h> #include <linux/mutex.h> +#include <linux/dma-buf-map.h> #include <linux/dma-fence.h> #include <drm/drm_print.h> #include <drm/ttm/ttm_caching.h> +#include <drm/ttm/ttm_kmap_iter.h> #define TTM_MAX_BO_PRIORITY 4U @@ -38,6 +40,10 @@ struct ttm_resource_manager; struct ttm_resource; struct ttm_place; struct ttm_buffer_object; +struct dma_buf_map; +struct io_mapping; +struct sg_table; +struct scatterlist; struct ttm_resource_manager_func { /** @@ -45,46 +51,38 @@ struct ttm_resource_manager_func { * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. - * @placement: Placement details. - * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @place: Placement details. + * @res: Resulting pointer to the ttm_resource. * * This function should allocate space in the memory type managed - * by @man. Placement details if - * applicable are given by @placement. If successful, - * @mem::mm_node should be set to a non-null value, and - * @mem::start should be set to a value identifying the beginning + * by @man. Placement details if applicable are given by @place. If + * successful, a filled in ttm_resource object should be returned in + * @res. @res::start should be set to a value identifying the beginning * of the range allocated, and the function should return zero. - * If the memory region accommodate the buffer object, @mem::mm_node - * should be set to NULL, and the function should return 0. + * If the manager can't fulfill the request -ENOSPC should be returned. * If a system error occurred, preventing the request to be fulfilled, * the function should return a negative error code. * - * Note that @mem::mm_node will only be dereferenced by - * struct ttm_resource_manager functions and optionally by the driver, - * which has knowledge of the underlying type. - * - * This function may not be called from within atomic context, so - * an implementation can and must use either a mutex or a spinlock to - * protect any data structures managing the space. + * This function may not be called from within atomic context and needs + * to take care of its own locking to protect any data structures + * managing the space. */ int (*alloc)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem); + struct ttm_resource **res); /** * struct ttm_resource_manager_func member free * * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_resource to be filled in. + * @res: Pointer to a struct ttm_resource to be freed. * - * This function frees memory type resources previously allocated - * and that are identified by @mem::mm_node and @mem::start. May not - * be called from within atomic context. + * This function frees memory type resources previously allocated. + * May not be called from within atomic context. */ void (*free)(struct ttm_resource_manager *man, - struct ttm_resource *mem); + struct ttm_resource *res); /** * struct ttm_resource_manager_func member debug @@ -158,10 +156,9 @@ struct ttm_bus_placement { /** * struct ttm_resource * - * @mm_node: Memory manager node. - * @size: Requested size of memory region. - * @num_pages: Actual size of memory region in pages. - * @page_alignment: Page alignment. + * @start: Start of the allocation. + * @num_pages: Actual size of resource in pages. + * @mem_type: Resource type of the allocation. * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU * @@ -169,16 +166,53 @@ struct ttm_bus_placement { * buffer object. */ struct ttm_resource { - void *mm_node; unsigned long start; unsigned long num_pages; - uint32_t page_alignment; uint32_t mem_type; uint32_t placement; struct ttm_bus_placement bus; }; /** + * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping + + * struct sg_table backed struct ttm_resource. + * @base: Embedded struct ttm_kmap_iter providing the usage interface. + * @iomap: struct io_mapping representing the underlying linear io_memory. + * @st: sg_table into @iomap, representing the memory of the struct ttm_resource. + * @start: Offset that needs to be subtracted from @st to make + * sg_dma_address(st->sgl) - @start == 0 for @iomap start. + * @cache: Scatterlist traversal cache for fast lookups. + * @cache.sg: Pointer to the currently cached scatterlist segment. + * @cache.i: First index of @sg. PAGE_SIZE granularity. + * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity. + * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity. + */ +struct ttm_kmap_iter_iomap { + struct ttm_kmap_iter base; + struct io_mapping *iomap; + struct sg_table *st; + resource_size_t start; + struct { + struct scatterlist *sg; + pgoff_t i; + pgoff_t end; + pgoff_t offs; + } cache; +}; + +/** + * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io + * @base: The base iterator + * @dmap: Points to the starting address of the region + * @needs_unmap: Whether we need to unmap on fini + */ +struct ttm_kmap_iter_linear_io { + struct ttm_kmap_iter base; + struct dma_buf_map dmap; + bool needs_unmap; +}; + +/** * ttm_resource_manager_set_used * * @man: A memory manager object. @@ -225,10 +259,13 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) man->move = NULL; } +void ttm_resource_init(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *res); -void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + struct ttm_resource **res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); @@ -239,4 +276,20 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); + +struct ttm_kmap_iter_linear_io; + +struct ttm_kmap_iter * +ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); + +void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); #endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 134d09ef7766..b20e89d321b0 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -27,10 +27,12 @@ #ifndef _TTM_TT_H_ #define _TTM_TT_H_ +#include <linux/pagemap.h> #include <linux/types.h> #include <drm/ttm/ttm_caching.h> +#include <drm/ttm/ttm_kmap_iter.h> -struct ttm_bo_device; +struct ttm_device; struct ttm_tt; struct ttm_resource; struct ttm_buffer_object; @@ -69,6 +71,18 @@ struct ttm_tt { enum ttm_caching caching; }; +/** + * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt. + * @base: Embedded struct ttm_kmap_iter providing the usage interface + * @tt: Cached struct ttm_tt. + * @prot: Cached page protection for mapping. + */ +struct ttm_kmap_iter_tt { + struct ttm_kmap_iter base; + struct ttm_tt *tt; + pgprot_t prot; +}; + static inline bool ttm_tt_is_populated(struct ttm_tt *tt) { return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; @@ -157,8 +171,24 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper */ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); +/** + * ttm_tt_mark_for_clear - Mark pages for clearing on populate. + * + * @ttm: Pointer to the ttm_tt structure + * + * Marks pages for clearing so that the next time the page vector is + * populated, the pages will be cleared. + */ +static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) +{ + ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; +} + void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); +struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, + struct ttm_tt *tt); + #if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h index a250a52a6192..a237eb26accb 100644 --- a/include/dt-bindings/clock/actions,s500-cmu.h +++ b/include/dt-bindings/clock/actions,s500-cmu.h @@ -74,10 +74,12 @@ #define CLK_RMII_REF 54 #define CLK_GPIO 55 -/* system clock (part 2) */ +/* additional clocks */ #define CLK_APB 56 #define CLK_DMAC 57 +#define CLK_NIC 58 +#define CLK_ETHERNET 59 -#define CLK_NR_CLKS (CLK_DMAC + 1) +#define CLK_NR_CLKS (CLK_ETHERNET + 1) #endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */ diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h new file mode 100644 index 000000000000..5fe7689010a0 --- /dev/null +++ b/include/dt-bindings/clock/hi3559av100-clock.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later or BSD-2-Clause */ +/* + * Copyright (c) 2019-2020, Huawei Tech. Co., Ltd. + * + * Author: Dongjiu Geng <gengdongjiu@huawei.com> + */ + +#ifndef __DTS_HI3559AV100_CLOCK_H +#define __DTS_HI3559AV100_CLOCK_H + +/* fixed rate */ +#define HI3559AV100_FIXED_1188M 1 +#define HI3559AV100_FIXED_1000M 2 +#define HI3559AV100_FIXED_842M 3 +#define HI3559AV100_FIXED_792M 4 +#define HI3559AV100_FIXED_750M 5 +#define HI3559AV100_FIXED_710M 6 +#define HI3559AV100_FIXED_680M 7 +#define HI3559AV100_FIXED_667M 8 +#define HI3559AV100_FIXED_631M 9 +#define HI3559AV100_FIXED_600M 10 +#define HI3559AV100_FIXED_568M 11 +#define HI3559AV100_FIXED_500M 12 +#define HI3559AV100_FIXED_475M 13 +#define HI3559AV100_FIXED_428M 14 +#define HI3559AV100_FIXED_400M 15 +#define HI3559AV100_FIXED_396M 16 +#define HI3559AV100_FIXED_300M 17 +#define HI3559AV100_FIXED_250M 18 +#define HI3559AV100_FIXED_198M 19 +#define HI3559AV100_FIXED_187p5M 20 +#define HI3559AV100_FIXED_150M 21 +#define HI3559AV100_FIXED_148p5M 22 +#define HI3559AV100_FIXED_125M 23 +#define HI3559AV100_FIXED_107M 24 +#define HI3559AV100_FIXED_100M 25 +#define HI3559AV100_FIXED_99M 26 +#define HI3559AV100_FIXED_74p25M 27 +#define HI3559AV100_FIXED_72M 28 +#define HI3559AV100_FIXED_60M 29 +#define HI3559AV100_FIXED_54M 30 +#define HI3559AV100_FIXED_50M 31 +#define HI3559AV100_FIXED_49p5M 32 +#define HI3559AV100_FIXED_37p125M 33 +#define HI3559AV100_FIXED_36M 34 +#define HI3559AV100_FIXED_32p4M 35 +#define HI3559AV100_FIXED_27M 36 +#define HI3559AV100_FIXED_25M 37 +#define HI3559AV100_FIXED_24M 38 +#define HI3559AV100_FIXED_12M 39 +#define HI3559AV100_FIXED_3M 40 +#define HI3559AV100_FIXED_1p6M 41 +#define HI3559AV100_FIXED_400K 42 +#define HI3559AV100_FIXED_100K 43 +#define HI3559AV100_FIXED_200M 44 +#define HI3559AV100_FIXED_75M 75 + +#define HI3559AV100_I2C0_CLK 50 +#define HI3559AV100_I2C1_CLK 51 +#define HI3559AV100_I2C2_CLK 52 +#define HI3559AV100_I2C3_CLK 53 +#define HI3559AV100_I2C4_CLK 54 +#define HI3559AV100_I2C5_CLK 55 +#define HI3559AV100_I2C6_CLK 56 +#define HI3559AV100_I2C7_CLK 57 +#define HI3559AV100_I2C8_CLK 58 +#define HI3559AV100_I2C9_CLK 59 +#define HI3559AV100_I2C10_CLK 60 +#define HI3559AV100_I2C11_CLK 61 + +#define HI3559AV100_SPI0_CLK 62 +#define HI3559AV100_SPI1_CLK 63 +#define HI3559AV100_SPI2_CLK 64 +#define HI3559AV100_SPI3_CLK 65 +#define HI3559AV100_SPI4_CLK 66 +#define HI3559AV100_SPI5_CLK 67 +#define HI3559AV100_SPI6_CLK 68 + +#define HI3559AV100_EDMAC_CLK 69 +#define HI3559AV100_EDMAC_AXICLK 70 +#define HI3559AV100_EDMAC1_CLK 71 +#define HI3559AV100_EDMAC1_AXICLK 72 +#define HI3559AV100_VDMAC_CLK 73 + +/* mux clocks */ +#define HI3559AV100_FMC_MUX 80 +#define HI3559AV100_SYSAPB_MUX 81 +#define HI3559AV100_UART_MUX 82 +#define HI3559AV100_SYSBUS_MUX 83 +#define HI3559AV100_A73_MUX 84 +#define HI3559AV100_MMC0_MUX 85 +#define HI3559AV100_MMC1_MUX 86 +#define HI3559AV100_MMC2_MUX 87 +#define HI3559AV100_MMC3_MUX 88 + +/* gate clocks */ +#define HI3559AV100_FMC_CLK 90 +#define HI3559AV100_UART0_CLK 91 +#define HI3559AV100_UART1_CLK 92 +#define HI3559AV100_UART2_CLK 93 +#define HI3559AV100_UART3_CLK 94 +#define HI3559AV100_UART4_CLK 95 +#define HI3559AV100_MMC0_CLK 96 +#define HI3559AV100_MMC1_CLK 97 +#define HI3559AV100_MMC2_CLK 98 +#define HI3559AV100_MMC3_CLK 99 + +#define HI3559AV100_ETH_CLK 100 +#define HI3559AV100_ETH_MACIF_CLK 101 +#define HI3559AV100_ETH1_CLK 102 +#define HI3559AV100_ETH1_MACIF_CLK 103 + +/* complex */ +#define HI3559AV100_MAC0_CLK 110 +#define HI3559AV100_MAC1_CLK 111 +#define HI3559AV100_SATA_CLK 112 +#define HI3559AV100_USB_CLK 113 +#define HI3559AV100_USB1_CLK 114 + +/* pll clocks */ +#define HI3559AV100_APLL_CLK 250 +#define HI3559AV100_GPLL_CLK 251 + +#define HI3559AV100_CRG_NR_CLKS 256 + +#define HI3559AV100_SHUB_SOURCE_SOC_24M 0 +#define HI3559AV100_SHUB_SOURCE_SOC_200M 1 +#define HI3559AV100_SHUB_SOURCE_SOC_300M 2 +#define HI3559AV100_SHUB_SOURCE_PLL 3 +#define HI3559AV100_SHUB_SOURCE_CLK 4 + +#define HI3559AV100_SHUB_I2C0_CLK 10 +#define HI3559AV100_SHUB_I2C1_CLK 11 +#define HI3559AV100_SHUB_I2C2_CLK 12 +#define HI3559AV100_SHUB_I2C3_CLK 13 +#define HI3559AV100_SHUB_I2C4_CLK 14 +#define HI3559AV100_SHUB_I2C5_CLK 15 +#define HI3559AV100_SHUB_I2C6_CLK 16 +#define HI3559AV100_SHUB_I2C7_CLK 17 + +#define HI3559AV100_SHUB_SPI_SOURCE_CLK 20 +#define HI3559AV100_SHUB_SPI4_SOURCE_CLK 21 +#define HI3559AV100_SHUB_SPI0_CLK 22 +#define HI3559AV100_SHUB_SPI1_CLK 23 +#define HI3559AV100_SHUB_SPI2_CLK 24 +#define HI3559AV100_SHUB_SPI3_CLK 25 +#define HI3559AV100_SHUB_SPI4_CLK 26 + +#define HI3559AV100_SHUB_UART_CLK_32K 30 +#define HI3559AV100_SHUB_UART_SOURCE_CLK 31 +#define HI3559AV100_SHUB_UART_DIV_CLK 32 +#define HI3559AV100_SHUB_UART0_CLK 33 +#define HI3559AV100_SHUB_UART1_CLK 34 +#define HI3559AV100_SHUB_UART2_CLK 35 +#define HI3559AV100_SHUB_UART3_CLK 36 +#define HI3559AV100_SHUB_UART4_CLK 37 +#define HI3559AV100_SHUB_UART5_CLK 38 +#define HI3559AV100_SHUB_UART6_CLK 39 + +#define HI3559AV100_SHUB_EDMAC_CLK 40 + +#define HI3559AV100_SHUB_NR_CLKS 50 + +#endif /* __DTS_HI3559AV100_CLOCK_H */ + diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h index 82b1fc8d1ee0..2e60ce4d2622 100644 --- a/include/dt-bindings/clock/imx8-clock.h +++ b/include/dt-bindings/clock/imx8-clock.h @@ -7,134 +7,6 @@ #ifndef __DT_BINDINGS_CLOCK_IMX_H #define __DT_BINDINGS_CLOCK_IMX_H -/* SCU Clocks */ - -#define IMX_CLK_DUMMY 0 - -/* CPU */ -#define IMX_A35_CLK 1 - -/* LSIO SS */ -#define IMX_LSIO_MEM_CLK 2 -#define IMX_LSIO_BUS_CLK 3 -#define IMX_LSIO_PWM0_CLK 10 -#define IMX_LSIO_PWM1_CLK 11 -#define IMX_LSIO_PWM2_CLK 12 -#define IMX_LSIO_PWM3_CLK 13 -#define IMX_LSIO_PWM4_CLK 14 -#define IMX_LSIO_PWM5_CLK 15 -#define IMX_LSIO_PWM6_CLK 16 -#define IMX_LSIO_PWM7_CLK 17 -#define IMX_LSIO_GPT0_CLK 18 -#define IMX_LSIO_GPT1_CLK 19 -#define IMX_LSIO_GPT2_CLK 20 -#define IMX_LSIO_GPT3_CLK 21 -#define IMX_LSIO_GPT4_CLK 22 -#define IMX_LSIO_FSPI0_CLK 23 -#define IMX_LSIO_FSPI1_CLK 24 - -/* Connectivity SS */ -#define IMX_CONN_AXI_CLK_ROOT 30 -#define IMX_CONN_AHB_CLK_ROOT 31 -#define IMX_CONN_IPG_CLK_ROOT 32 -#define IMX_CONN_SDHC0_CLK 40 -#define IMX_CONN_SDHC1_CLK 41 -#define IMX_CONN_SDHC2_CLK 42 -#define IMX_CONN_ENET0_ROOT_CLK 43 -#define IMX_CONN_ENET0_BYPASS_CLK 44 -#define IMX_CONN_ENET0_RGMII_CLK 45 -#define IMX_CONN_ENET1_ROOT_CLK 46 -#define IMX_CONN_ENET1_BYPASS_CLK 47 -#define IMX_CONN_ENET1_RGMII_CLK 48 -#define IMX_CONN_GPMI_BCH_IO_CLK 49 -#define IMX_CONN_GPMI_BCH_CLK 50 -#define IMX_CONN_USB2_ACLK 51 -#define IMX_CONN_USB2_BUS_CLK 52 -#define IMX_CONN_USB2_LPM_CLK 53 - -/* HSIO SS */ -#define IMX_HSIO_AXI_CLK 60 -#define IMX_HSIO_PER_CLK 61 - -/* Display controller SS */ -#define IMX_DC_AXI_EXT_CLK 70 -#define IMX_DC_AXI_INT_CLK 71 -#define IMX_DC_CFG_CLK 72 -#define IMX_DC0_PLL0_CLK 80 -#define IMX_DC0_PLL1_CLK 81 -#define IMX_DC0_DISP0_CLK 82 -#define IMX_DC0_DISP1_CLK 83 -#define IMX_DC0_BYPASS0_CLK 84 -#define IMX_DC0_BYPASS1_CLK 85 - -/* MIPI-LVDS SS */ -#define IMX_MIPI_IPG_CLK 90 -#define IMX_MIPI0_PIXEL_CLK 100 -#define IMX_MIPI0_BYPASS_CLK 101 -#define IMX_MIPI0_LVDS_PIXEL_CLK 102 -#define IMX_MIPI0_LVDS_BYPASS_CLK 103 -#define IMX_MIPI0_LVDS_PHY_CLK 104 -#define IMX_MIPI0_I2C0_CLK 105 -#define IMX_MIPI0_I2C1_CLK 106 -#define IMX_MIPI0_PWM0_CLK 107 -#define IMX_MIPI1_PIXEL_CLK 108 -#define IMX_MIPI1_BYPASS_CLK 109 -#define IMX_MIPI1_LVDS_PIXEL_CLK 110 -#define IMX_MIPI1_LVDS_BYPASS_CLK 111 -#define IMX_MIPI1_LVDS_PHY_CLK 112 -#define IMX_MIPI1_I2C0_CLK 113 -#define IMX_MIPI1_I2C1_CLK 114 -#define IMX_MIPI1_PWM0_CLK 115 - -/* IMG SS */ -#define IMX_IMG_AXI_CLK 120 -#define IMX_IMG_IPG_CLK 121 -#define IMX_IMG_PXL_CLK 122 - -/* MIPI-CSI SS */ -#define IMX_CSI0_CORE_CLK 130 -#define IMX_CSI0_ESC_CLK 131 -#define IMX_CSI0_PWM0_CLK 132 -#define IMX_CSI0_I2C0_CLK 133 - -/* PARALLER CSI SS */ -#define IMX_PARALLEL_CSI_DPLL_CLK 140 -#define IMX_PARALLEL_CSI_PIXEL_CLK 141 -#define IMX_PARALLEL_CSI_MCLK_CLK 142 - -/* VPU SS */ -#define IMX_VPU_ENC_CLK 150 -#define IMX_VPU_DEC_CLK 151 - -/* GPU SS */ -#define IMX_GPU0_CORE_CLK 160 -#define IMX_GPU0_SHADER_CLK 161 - -/* ADMA SS */ -#define IMX_ADMA_IPG_CLK_ROOT 165 -#define IMX_ADMA_UART0_CLK 170 -#define IMX_ADMA_UART1_CLK 171 -#define IMX_ADMA_UART2_CLK 172 -#define IMX_ADMA_UART3_CLK 173 -#define IMX_ADMA_SPI0_CLK 174 -#define IMX_ADMA_SPI1_CLK 175 -#define IMX_ADMA_SPI2_CLK 176 -#define IMX_ADMA_SPI3_CLK 177 -#define IMX_ADMA_CAN0_CLK 178 -#define IMX_ADMA_CAN1_CLK 179 -#define IMX_ADMA_CAN2_CLK 180 -#define IMX_ADMA_I2C0_CLK 181 -#define IMX_ADMA_I2C1_CLK 182 -#define IMX_ADMA_I2C2_CLK 183 -#define IMX_ADMA_I2C3_CLK 184 -#define IMX_ADMA_FTM0_CLK 185 -#define IMX_ADMA_FTM1_CLK 186 -#define IMX_ADMA_ADC0_CLK 187 -#define IMX_ADMA_PWM_CLK 188 -#define IMX_ADMA_LCD_CLK 189 - -#define IMX_SCU_CLK_END 190 - /* LPCG clocks */ /* LSIO SS LPCG */ diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index d24b627cb2e7..01e8bab1d767 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -241,6 +241,8 @@ #define IMX8MN_CLK_CLKOUT2_DIV 219 #define IMX8MN_CLK_CLKOUT2 220 -#define IMX8MN_CLK_END 221 +#define IMX8MN_CLK_M7_CORE 221 + +#define IMX8MN_CLK_END 222 #endif diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index 82e907ce7bdd..afa74d7ba100 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -405,25 +405,6 @@ #define IMX8MQ_VIDEO2_PLL1_REF_SEL 266 -#define IMX8MQ_SYS1_PLL_40M_CG 267 -#define IMX8MQ_SYS1_PLL_80M_CG 268 -#define IMX8MQ_SYS1_PLL_100M_CG 269 -#define IMX8MQ_SYS1_PLL_133M_CG 270 -#define IMX8MQ_SYS1_PLL_160M_CG 271 -#define IMX8MQ_SYS1_PLL_200M_CG 272 -#define IMX8MQ_SYS1_PLL_266M_CG 273 -#define IMX8MQ_SYS1_PLL_400M_CG 274 -#define IMX8MQ_SYS1_PLL_800M_CG 275 -#define IMX8MQ_SYS2_PLL_50M_CG 276 -#define IMX8MQ_SYS2_PLL_100M_CG 277 -#define IMX8MQ_SYS2_PLL_125M_CG 278 -#define IMX8MQ_SYS2_PLL_166M_CG 279 -#define IMX8MQ_SYS2_PLL_200M_CG 280 -#define IMX8MQ_SYS2_PLL_250M_CG 281 -#define IMX8MQ_SYS2_PLL_333M_CG 282 -#define IMX8MQ_SYS2_PLL_500M_CG 283 -#define IMX8MQ_SYS2_PLL_1000M_CG 284 - #define IMX8MQ_CLK_GPU_CORE 285 #define IMX8MQ_CLK_GPU_SHADER 286 #define IMX8MQ_CLK_M4_CORE 287 diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h index 063791b01ab3..d7aa42c08ded 100644 --- a/include/dt-bindings/clock/ingenic,sysost.h +++ b/include/dt-bindings/clock/ingenic,sysost.h @@ -13,4 +13,23 @@ #define OST_CLK_PERCPU_TIMER2 3 #define OST_CLK_PERCPU_TIMER3 4 +#define OST_CLK_EVENT_TIMER 1 + +#define OST_CLK_EVENT_TIMER0 0 +#define OST_CLK_EVENT_TIMER1 1 +#define OST_CLK_EVENT_TIMER2 2 +#define OST_CLK_EVENT_TIMER3 3 +#define OST_CLK_EVENT_TIMER4 4 +#define OST_CLK_EVENT_TIMER5 5 +#define OST_CLK_EVENT_TIMER6 6 +#define OST_CLK_EVENT_TIMER7 7 +#define OST_CLK_EVENT_TIMER8 8 +#define OST_CLK_EVENT_TIMER9 9 +#define OST_CLK_EVENT_TIMER10 10 +#define OST_CLK_EVENT_TIMER11 11 +#define OST_CLK_EVENT_TIMER12 12 +#define OST_CLK_EVENT_TIMER13 13 +#define OST_CLK_EVENT_TIMER14 14 +#define OST_CLK_EVENT_TIMER15 15 + #endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */ diff --git a/include/dt-bindings/clock/jz4760-cgu.h b/include/dt-bindings/clock/jz4760-cgu.h new file mode 100644 index 000000000000..4bb2e19c4743 --- /dev/null +++ b/include/dt-bindings/clock/jz4760-cgu.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4760-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ + +#define JZ4760_CLK_EXT 0 +#define JZ4760_CLK_OSC32K 1 +#define JZ4760_CLK_PLL0 2 +#define JZ4760_CLK_PLL0_HALF 3 +#define JZ4760_CLK_PLL1 4 +#define JZ4760_CLK_CCLK 5 +#define JZ4760_CLK_HCLK 6 +#define JZ4760_CLK_SCLK 7 +#define JZ4760_CLK_H2CLK 8 +#define JZ4760_CLK_MCLK 9 +#define JZ4760_CLK_PCLK 10 +#define JZ4760_CLK_MMC_MUX 11 +#define JZ4760_CLK_MMC0 12 +#define JZ4760_CLK_MMC1 13 +#define JZ4760_CLK_MMC2 14 +#define JZ4760_CLK_CIM 15 +#define JZ4760_CLK_UHC 16 +#define JZ4760_CLK_GPU 17 +#define JZ4760_CLK_GPS 18 +#define JZ4760_CLK_SSI_MUX 19 +#define JZ4760_CLK_PCM 20 +#define JZ4760_CLK_I2S 21 +#define JZ4760_CLK_OTG 22 +#define JZ4760_CLK_SSI0 23 +#define JZ4760_CLK_SSI1 24 +#define JZ4760_CLK_SSI2 25 +#define JZ4760_CLK_DMA 26 +#define JZ4760_CLK_I2C0 27 +#define JZ4760_CLK_I2C1 28 +#define JZ4760_CLK_UART0 29 +#define JZ4760_CLK_UART1 30 +#define JZ4760_CLK_UART2 31 +#define JZ4760_CLK_UART3 32 +#define JZ4760_CLK_IPU 33 +#define JZ4760_CLK_ADC 34 +#define JZ4760_CLK_AIC 35 +#define JZ4760_CLK_VPU 36 +#define JZ4760_CLK_UHC_PHY 37 +#define JZ4760_CLK_OTG_PHY 38 +#define JZ4760_CLK_EXT512 39 +#define JZ4760_CLK_RTC 40 +#define JZ4760_CLK_LPCLK_DIV 41 +#define JZ4760_CLK_TVE 42 +#define JZ4760_CLK_LPCLK 43 + +#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h index 3acebe937bfc..3d00c98b9654 100644 --- a/include/dt-bindings/clock/mt8173-clk.h +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -186,7 +186,6 @@ #define CLK_INFRA_PMICWRAP 11 #define CLK_INFRA_CLK_13M 12 #define CLK_INFRA_CA53SEL 13 -#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */ #define CLK_INFRA_CA72SEL 14 #define CLK_INFRA_NR_CLK 15 diff --git a/include/dt-bindings/clock/mt8192-clk.h b/include/dt-bindings/clock/mt8192-clk.h new file mode 100644 index 000000000000..5ab68f15a256 --- /dev/null +++ b/include/dt-bindings/clock/mt8192-clk.h @@ -0,0 +1,585 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + */ + +#ifndef _DT_BINDINGS_CLK_MT8192_H +#define _DT_BINDINGS_CLK_MT8192_H + +/* TOPCKGEN */ + +#define CLK_TOP_AXI_SEL 0 +#define CLK_TOP_SPM_SEL 1 +#define CLK_TOP_SCP_SEL 2 +#define CLK_TOP_BUS_AXIMEM_SEL 3 +#define CLK_TOP_DISP_SEL 4 +#define CLK_TOP_MDP_SEL 5 +#define CLK_TOP_IMG1_SEL 6 +#define CLK_TOP_IMG2_SEL 7 +#define CLK_TOP_IPE_SEL 8 +#define CLK_TOP_DPE_SEL 9 +#define CLK_TOP_CAM_SEL 10 +#define CLK_TOP_CCU_SEL 11 +#define CLK_TOP_DSP7_SEL 12 +#define CLK_TOP_MFG_REF_SEL 13 +#define CLK_TOP_MFG_PLL_SEL 14 +#define CLK_TOP_CAMTG_SEL 15 +#define CLK_TOP_CAMTG2_SEL 16 +#define CLK_TOP_CAMTG3_SEL 17 +#define CLK_TOP_CAMTG4_SEL 18 +#define CLK_TOP_CAMTG5_SEL 19 +#define CLK_TOP_CAMTG6_SEL 20 +#define CLK_TOP_UART_SEL 21 +#define CLK_TOP_SPI_SEL 22 +#define CLK_TOP_MSDC50_0_H_SEL 23 +#define CLK_TOP_MSDC50_0_SEL 24 +#define CLK_TOP_MSDC30_1_SEL 25 +#define CLK_TOP_MSDC30_2_SEL 26 +#define CLK_TOP_AUDIO_SEL 27 +#define CLK_TOP_AUD_INTBUS_SEL 28 +#define CLK_TOP_PWRAP_ULPOSC_SEL 29 +#define CLK_TOP_ATB_SEL 30 +#define CLK_TOP_DPI_SEL 31 +#define CLK_TOP_SCAM_SEL 32 +#define CLK_TOP_DISP_PWM_SEL 33 +#define CLK_TOP_USB_TOP_SEL 34 +#define CLK_TOP_SSUSB_XHCI_SEL 35 +#define CLK_TOP_I2C_SEL 36 +#define CLK_TOP_SENINF_SEL 37 +#define CLK_TOP_SENINF1_SEL 38 +#define CLK_TOP_SENINF2_SEL 39 +#define CLK_TOP_SENINF3_SEL 40 +#define CLK_TOP_TL_SEL 41 +#define CLK_TOP_DXCC_SEL 42 +#define CLK_TOP_AUD_ENGEN1_SEL 43 +#define CLK_TOP_AUD_ENGEN2_SEL 44 +#define CLK_TOP_AES_UFSFDE_SEL 45 +#define CLK_TOP_UFS_SEL 46 +#define CLK_TOP_AUD_1_SEL 47 +#define CLK_TOP_AUD_2_SEL 48 +#define CLK_TOP_ADSP_SEL 49 +#define CLK_TOP_DPMAIF_MAIN_SEL 50 +#define CLK_TOP_VENC_SEL 51 +#define CLK_TOP_VDEC_SEL 52 +#define CLK_TOP_CAMTM_SEL 53 +#define CLK_TOP_PWM_SEL 54 +#define CLK_TOP_AUDIO_H_SEL 55 +#define CLK_TOP_SPMI_MST_SEL 56 +#define CLK_TOP_AES_MSDCFDE_SEL 57 +#define CLK_TOP_SFLASH_SEL 58 +#define CLK_TOP_APLL_I2S0_M_SEL 59 +#define CLK_TOP_APLL_I2S1_M_SEL 60 +#define CLK_TOP_APLL_I2S2_M_SEL 61 +#define CLK_TOP_APLL_I2S3_M_SEL 62 +#define CLK_TOP_APLL_I2S4_M_SEL 63 +#define CLK_TOP_APLL_I2S5_M_SEL 64 +#define CLK_TOP_APLL_I2S6_M_SEL 65 +#define CLK_TOP_APLL_I2S7_M_SEL 66 +#define CLK_TOP_APLL_I2S8_M_SEL 67 +#define CLK_TOP_APLL_I2S9_M_SEL 68 +#define CLK_TOP_MAINPLL_D3 69 +#define CLK_TOP_MAINPLL_D4 70 +#define CLK_TOP_MAINPLL_D4_D2 71 +#define CLK_TOP_MAINPLL_D4_D4 72 +#define CLK_TOP_MAINPLL_D4_D8 73 +#define CLK_TOP_MAINPLL_D4_D16 74 +#define CLK_TOP_MAINPLL_D5 75 +#define CLK_TOP_MAINPLL_D5_D2 76 +#define CLK_TOP_MAINPLL_D5_D4 77 +#define CLK_TOP_MAINPLL_D5_D8 78 +#define CLK_TOP_MAINPLL_D6 79 +#define CLK_TOP_MAINPLL_D6_D2 80 +#define CLK_TOP_MAINPLL_D6_D4 81 +#define CLK_TOP_MAINPLL_D7 82 +#define CLK_TOP_MAINPLL_D7_D2 83 +#define CLK_TOP_MAINPLL_D7_D4 84 +#define CLK_TOP_MAINPLL_D7_D8 85 +#define CLK_TOP_UNIVPLL_D3 86 +#define CLK_TOP_UNIVPLL_D4 87 +#define CLK_TOP_UNIVPLL_D4_D2 88 +#define CLK_TOP_UNIVPLL_D4_D4 89 +#define CLK_TOP_UNIVPLL_D4_D8 90 +#define CLK_TOP_UNIVPLL_D5 91 +#define CLK_TOP_UNIVPLL_D5_D2 92 +#define CLK_TOP_UNIVPLL_D5_D4 93 +#define CLK_TOP_UNIVPLL_D5_D8 94 +#define CLK_TOP_UNIVPLL_D6 95 +#define CLK_TOP_UNIVPLL_D6_D2 96 +#define CLK_TOP_UNIVPLL_D6_D4 97 +#define CLK_TOP_UNIVPLL_D6_D8 98 +#define CLK_TOP_UNIVPLL_D6_D16 99 +#define CLK_TOP_UNIVPLL_D7 100 +#define CLK_TOP_APLL1 101 +#define CLK_TOP_APLL1_D2 102 +#define CLK_TOP_APLL1_D4 103 +#define CLK_TOP_APLL1_D8 104 +#define CLK_TOP_APLL2 105 +#define CLK_TOP_APLL2_D2 106 +#define CLK_TOP_APLL2_D4 107 +#define CLK_TOP_APLL2_D8 108 +#define CLK_TOP_MMPLL_D4 109 +#define CLK_TOP_MMPLL_D4_D2 110 +#define CLK_TOP_MMPLL_D5 111 +#define CLK_TOP_MMPLL_D5_D2 112 +#define CLK_TOP_MMPLL_D6 113 +#define CLK_TOP_MMPLL_D6_D2 114 +#define CLK_TOP_MMPLL_D7 115 +#define CLK_TOP_MMPLL_D9 116 +#define CLK_TOP_APUPLL 117 +#define CLK_TOP_NPUPLL 118 +#define CLK_TOP_TVDPLL 119 +#define CLK_TOP_TVDPLL_D2 120 +#define CLK_TOP_TVDPLL_D4 121 +#define CLK_TOP_TVDPLL_D8 122 +#define CLK_TOP_TVDPLL_D16 123 +#define CLK_TOP_MSDCPLL 124 +#define CLK_TOP_MSDCPLL_D2 125 +#define CLK_TOP_MSDCPLL_D4 126 +#define CLK_TOP_ULPOSC 127 +#define CLK_TOP_OSC_D2 128 +#define CLK_TOP_OSC_D4 129 +#define CLK_TOP_OSC_D8 130 +#define CLK_TOP_OSC_D10 131 +#define CLK_TOP_OSC_D16 132 +#define CLK_TOP_OSC_D20 133 +#define CLK_TOP_CSW_F26M_D2 134 +#define CLK_TOP_ADSPPLL 135 +#define CLK_TOP_UNIVPLL_192M 136 +#define CLK_TOP_UNIVPLL_192M_D2 137 +#define CLK_TOP_UNIVPLL_192M_D4 138 +#define CLK_TOP_UNIVPLL_192M_D8 139 +#define CLK_TOP_UNIVPLL_192M_D16 140 +#define CLK_TOP_UNIVPLL_192M_D32 141 +#define CLK_TOP_APLL12_DIV0 142 +#define CLK_TOP_APLL12_DIV1 143 +#define CLK_TOP_APLL12_DIV2 144 +#define CLK_TOP_APLL12_DIV3 145 +#define CLK_TOP_APLL12_DIV4 146 +#define CLK_TOP_APLL12_DIVB 147 +#define CLK_TOP_APLL12_DIV5 148 +#define CLK_TOP_APLL12_DIV6 149 +#define CLK_TOP_APLL12_DIV7 150 +#define CLK_TOP_APLL12_DIV8 151 +#define CLK_TOP_APLL12_DIV9 152 +#define CLK_TOP_SSUSB_TOP_REF 153 +#define CLK_TOP_SSUSB_PHY_REF 154 +#define CLK_TOP_NR_CLK 155 + +/* INFRACFG */ + +#define CLK_INFRA_PMIC_TMR 0 +#define CLK_INFRA_PMIC_AP 1 +#define CLK_INFRA_PMIC_MD 2 +#define CLK_INFRA_PMIC_CONN 3 +#define CLK_INFRA_SCPSYS 4 +#define CLK_INFRA_SEJ 5 +#define CLK_INFRA_APXGPT 6 +#define CLK_INFRA_GCE 7 +#define CLK_INFRA_GCE2 8 +#define CLK_INFRA_THERM 9 +#define CLK_INFRA_I2C0 10 +#define CLK_INFRA_AP_DMA_PSEUDO 11 +#define CLK_INFRA_I2C2 12 +#define CLK_INFRA_I2C3 13 +#define CLK_INFRA_PWM_H 14 +#define CLK_INFRA_PWM1 15 +#define CLK_INFRA_PWM2 16 +#define CLK_INFRA_PWM3 17 +#define CLK_INFRA_PWM4 18 +#define CLK_INFRA_PWM 19 +#define CLK_INFRA_UART0 20 +#define CLK_INFRA_UART1 21 +#define CLK_INFRA_UART2 22 +#define CLK_INFRA_UART3 23 +#define CLK_INFRA_GCE_26M 24 +#define CLK_INFRA_CQ_DMA_FPC 25 +#define CLK_INFRA_BTIF 26 +#define CLK_INFRA_SPI0 27 +#define CLK_INFRA_MSDC0 28 +#define CLK_INFRA_MSDC1 29 +#define CLK_INFRA_MSDC2 30 +#define CLK_INFRA_MSDC0_SRC 31 +#define CLK_INFRA_GCPU 32 +#define CLK_INFRA_TRNG 33 +#define CLK_INFRA_AUXADC 34 +#define CLK_INFRA_CPUM 35 +#define CLK_INFRA_CCIF1_AP 36 +#define CLK_INFRA_CCIF1_MD 37 +#define CLK_INFRA_AUXADC_MD 38 +#define CLK_INFRA_PCIE_TL_26M 39 +#define CLK_INFRA_MSDC1_SRC 40 +#define CLK_INFRA_MSDC2_SRC 41 +#define CLK_INFRA_PCIE_TL_96M 42 +#define CLK_INFRA_PCIE_PL_P_250M 43 +#define CLK_INFRA_DEVICE_APC 44 +#define CLK_INFRA_CCIF_AP 45 +#define CLK_INFRA_DEBUGSYS 46 +#define CLK_INFRA_AUDIO 47 +#define CLK_INFRA_CCIF_MD 48 +#define CLK_INFRA_DXCC_SEC_CORE 49 +#define CLK_INFRA_DXCC_AO 50 +#define CLK_INFRA_DBG_TRACE 51 +#define CLK_INFRA_DEVMPU_B 52 +#define CLK_INFRA_DRAMC_F26M 53 +#define CLK_INFRA_IRTX 54 +#define CLK_INFRA_SSUSB 55 +#define CLK_INFRA_DISP_PWM 56 +#define CLK_INFRA_CLDMA_B 57 +#define CLK_INFRA_AUDIO_26M_B 58 +#define CLK_INFRA_MODEM_TEMP_SHARE 59 +#define CLK_INFRA_SPI1 60 +#define CLK_INFRA_I2C4 61 +#define CLK_INFRA_SPI2 62 +#define CLK_INFRA_SPI3 63 +#define CLK_INFRA_UNIPRO_SYS 64 +#define CLK_INFRA_UNIPRO_TICK 65 +#define CLK_INFRA_UFS_MP_SAP_B 66 +#define CLK_INFRA_MD32_B 67 +#define CLK_INFRA_UNIPRO_MBIST 68 +#define CLK_INFRA_I2C5 69 +#define CLK_INFRA_I2C5_ARBITER 70 +#define CLK_INFRA_I2C5_IMM 71 +#define CLK_INFRA_I2C1_ARBITER 72 +#define CLK_INFRA_I2C1_IMM 73 +#define CLK_INFRA_I2C2_ARBITER 74 +#define CLK_INFRA_I2C2_IMM 75 +#define CLK_INFRA_SPI4 76 +#define CLK_INFRA_SPI5 77 +#define CLK_INFRA_CQ_DMA 78 +#define CLK_INFRA_UFS 79 +#define CLK_INFRA_AES_UFSFDE 80 +#define CLK_INFRA_UFS_TICK 81 +#define CLK_INFRA_SSUSB_XHCI 82 +#define CLK_INFRA_MSDC0_SELF 83 +#define CLK_INFRA_MSDC1_SELF 84 +#define CLK_INFRA_MSDC2_SELF 85 +#define CLK_INFRA_UFS_AXI 86 +#define CLK_INFRA_I2C6 87 +#define CLK_INFRA_AP_MSDC0 88 +#define CLK_INFRA_MD_MSDC0 89 +#define CLK_INFRA_CCIF5_AP 90 +#define CLK_INFRA_CCIF5_MD 91 +#define CLK_INFRA_PCIE_TOP_H_133M 92 +#define CLK_INFRA_FLASHIF_TOP_H_133M 93 +#define CLK_INFRA_PCIE_PERI_26M 94 +#define CLK_INFRA_CCIF2_AP 95 +#define CLK_INFRA_CCIF2_MD 96 +#define CLK_INFRA_CCIF3_AP 97 +#define CLK_INFRA_CCIF3_MD 98 +#define CLK_INFRA_SEJ_F13M 99 +#define CLK_INFRA_AES 100 +#define CLK_INFRA_I2C7 101 +#define CLK_INFRA_I2C8 102 +#define CLK_INFRA_FBIST2FPC 103 +#define CLK_INFRA_DEVICE_APC_SYNC 104 +#define CLK_INFRA_DPMAIF_MAIN 105 +#define CLK_INFRA_PCIE_TL_32K 106 +#define CLK_INFRA_CCIF4_AP 107 +#define CLK_INFRA_CCIF4_MD 108 +#define CLK_INFRA_SPI6 109 +#define CLK_INFRA_SPI7 110 +#define CLK_INFRA_133M 111 +#define CLK_INFRA_66M 112 +#define CLK_INFRA_66M_PERI_BUS 113 +#define CLK_INFRA_FREE_DCM_133M 114 +#define CLK_INFRA_FREE_DCM_66M 115 +#define CLK_INFRA_PERI_BUS_DCM_133M 116 +#define CLK_INFRA_PERI_BUS_DCM_66M 117 +#define CLK_INFRA_FLASHIF_PERI_26M 118 +#define CLK_INFRA_FLASHIF_SFLASH 119 +#define CLK_INFRA_AP_DMA 120 +#define CLK_INFRA_NR_CLK 121 + +/* PERICFG */ + +#define CLK_PERI_PERIAXI 0 +#define CLK_PERI_NR_CLK 1 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_MAINPLL 0 +#define CLK_APMIXED_UNIVPLL 1 +#define CLK_APMIXED_USBPLL 2 +#define CLK_APMIXED_MSDCPLL 3 +#define CLK_APMIXED_MMPLL 4 +#define CLK_APMIXED_ADSPPLL 5 +#define CLK_APMIXED_MFGPLL 6 +#define CLK_APMIXED_TVDPLL 7 +#define CLK_APMIXED_APLL1 8 +#define CLK_APMIXED_APLL2 9 +#define CLK_APMIXED_MIPID26M 10 +#define CLK_APMIXED_NR_CLK 11 + +/* SCP_ADSP */ + +#define CLK_SCP_ADSP_AUDIODSP 0 +#define CLK_SCP_ADSP_NR_CLK 1 + +/* IMP_IIC_WRAP_C */ + +#define CLK_IMP_IIC_WRAP_C_I2C10 0 +#define CLK_IMP_IIC_WRAP_C_I2C11 1 +#define CLK_IMP_IIC_WRAP_C_I2C12 2 +#define CLK_IMP_IIC_WRAP_C_I2C13 3 +#define CLK_IMP_IIC_WRAP_C_NR_CLK 4 + +/* AUDSYS */ + +#define CLK_AUD_AFE 0 +#define CLK_AUD_22M 1 +#define CLK_AUD_24M 2 +#define CLK_AUD_APLL2_TUNER 3 +#define CLK_AUD_APLL_TUNER 4 +#define CLK_AUD_TDM 5 +#define CLK_AUD_ADC 6 +#define CLK_AUD_DAC 7 +#define CLK_AUD_DAC_PREDIS 8 +#define CLK_AUD_TML 9 +#define CLK_AUD_NLE 10 +#define CLK_AUD_I2S1_B 11 +#define CLK_AUD_I2S2_B 12 +#define CLK_AUD_I2S3_B 13 +#define CLK_AUD_I2S4_B 14 +#define CLK_AUD_CONNSYS_I2S_ASRC 15 +#define CLK_AUD_GENERAL1_ASRC 16 +#define CLK_AUD_GENERAL2_ASRC 17 +#define CLK_AUD_DAC_HIRES 18 +#define CLK_AUD_ADC_HIRES 19 +#define CLK_AUD_ADC_HIRES_TML 20 +#define CLK_AUD_ADDA6_ADC 21 +#define CLK_AUD_ADDA6_ADC_HIRES 22 +#define CLK_AUD_3RD_DAC 23 +#define CLK_AUD_3RD_DAC_PREDIS 24 +#define CLK_AUD_3RD_DAC_TML 25 +#define CLK_AUD_3RD_DAC_HIRES 26 +#define CLK_AUD_I2S5_B 27 +#define CLK_AUD_I2S6_B 28 +#define CLK_AUD_I2S7_B 29 +#define CLK_AUD_I2S8_B 30 +#define CLK_AUD_I2S9_B 31 +#define CLK_AUD_NR_CLK 32 + +/* IMP_IIC_WRAP_E */ + +#define CLK_IMP_IIC_WRAP_E_I2C3 0 +#define CLK_IMP_IIC_WRAP_E_NR_CLK 1 + +/* IMP_IIC_WRAP_S */ + +#define CLK_IMP_IIC_WRAP_S_I2C7 0 +#define CLK_IMP_IIC_WRAP_S_I2C8 1 +#define CLK_IMP_IIC_WRAP_S_I2C9 2 +#define CLK_IMP_IIC_WRAP_S_NR_CLK 3 + +/* IMP_IIC_WRAP_WS */ + +#define CLK_IMP_IIC_WRAP_WS_I2C1 0 +#define CLK_IMP_IIC_WRAP_WS_I2C2 1 +#define CLK_IMP_IIC_WRAP_WS_I2C4 2 +#define CLK_IMP_IIC_WRAP_WS_NR_CLK 3 + +/* IMP_IIC_WRAP_W */ + +#define CLK_IMP_IIC_WRAP_W_I2C5 0 +#define CLK_IMP_IIC_WRAP_W_NR_CLK 1 + +/* IMP_IIC_WRAP_N */ + +#define CLK_IMP_IIC_WRAP_N_I2C0 0 +#define CLK_IMP_IIC_WRAP_N_I2C6 1 +#define CLK_IMP_IIC_WRAP_N_NR_CLK 2 + +/* MSDC_TOP */ + +#define CLK_MSDC_TOP_AES_0P 0 +#define CLK_MSDC_TOP_SRC_0P 1 +#define CLK_MSDC_TOP_SRC_1P 2 +#define CLK_MSDC_TOP_SRC_2P 3 +#define CLK_MSDC_TOP_P_MSDC0 4 +#define CLK_MSDC_TOP_P_MSDC1 5 +#define CLK_MSDC_TOP_P_MSDC2 6 +#define CLK_MSDC_TOP_P_CFG 7 +#define CLK_MSDC_TOP_AXI 8 +#define CLK_MSDC_TOP_H_MST_0P 9 +#define CLK_MSDC_TOP_H_MST_1P 10 +#define CLK_MSDC_TOP_H_MST_2P 11 +#define CLK_MSDC_TOP_MEM_OFF_DLY_26M 12 +#define CLK_MSDC_TOP_32K 13 +#define CLK_MSDC_TOP_AHB2AXI_BRG_AXI 14 +#define CLK_MSDC_TOP_NR_CLK 15 + +/* MSDC */ + +#define CLK_MSDC_AXI_WRAP 0 +#define CLK_MSDC_NR_CLK 1 + +/* MFGCFG */ + +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* MMSYS */ + +#define CLK_MM_DISP_MUTEX0 0 +#define CLK_MM_DISP_CONFIG 1 +#define CLK_MM_DISP_OVL0 2 +#define CLK_MM_DISP_RDMA0 3 +#define CLK_MM_DISP_OVL0_2L 4 +#define CLK_MM_DISP_WDMA0 5 +#define CLK_MM_DISP_UFBC_WDMA0 6 +#define CLK_MM_DISP_RSZ0 7 +#define CLK_MM_DISP_AAL0 8 +#define CLK_MM_DISP_CCORR0 9 +#define CLK_MM_DISP_DITHER0 10 +#define CLK_MM_SMI_INFRA 11 +#define CLK_MM_DISP_GAMMA0 12 +#define CLK_MM_DISP_POSTMASK0 13 +#define CLK_MM_DISP_DSC_WRAP0 14 +#define CLK_MM_DSI0 15 +#define CLK_MM_DISP_COLOR0 16 +#define CLK_MM_SMI_COMMON 17 +#define CLK_MM_DISP_FAKE_ENG0 18 +#define CLK_MM_DISP_FAKE_ENG1 19 +#define CLK_MM_MDP_TDSHP4 20 +#define CLK_MM_MDP_RSZ4 21 +#define CLK_MM_MDP_AAL4 22 +#define CLK_MM_MDP_HDR4 23 +#define CLK_MM_MDP_RDMA4 24 +#define CLK_MM_MDP_COLOR4 25 +#define CLK_MM_DISP_Y2R0 26 +#define CLK_MM_SMI_GALS 27 +#define CLK_MM_DISP_OVL2_2L 28 +#define CLK_MM_DISP_RDMA4 29 +#define CLK_MM_DISP_DPI0 30 +#define CLK_MM_SMI_IOMMU 31 +#define CLK_MM_DSI_DSI0 32 +#define CLK_MM_DPI_DPI0 33 +#define CLK_MM_26MHZ 34 +#define CLK_MM_32KHZ 35 +#define CLK_MM_NR_CLK 36 + +/* IMGSYS */ + +#define CLK_IMG_LARB9 0 +#define CLK_IMG_LARB10 1 +#define CLK_IMG_DIP 2 +#define CLK_IMG_GALS 3 +#define CLK_IMG_NR_CLK 4 + +/* IMGSYS2 */ + +#define CLK_IMG2_LARB11 0 +#define CLK_IMG2_LARB12 1 +#define CLK_IMG2_MFB 2 +#define CLK_IMG2_WPE 3 +#define CLK_IMG2_MSS 4 +#define CLK_IMG2_GALS 5 +#define CLK_IMG2_NR_CLK 6 + +/* VDECSYS_SOC */ + +#define CLK_VDEC_SOC_LARB1 0 +#define CLK_VDEC_SOC_LAT 1 +#define CLK_VDEC_SOC_LAT_ACTIVE 2 +#define CLK_VDEC_SOC_VDEC 3 +#define CLK_VDEC_SOC_VDEC_ACTIVE 4 +#define CLK_VDEC_SOC_NR_CLK 5 + +/* VDECSYS */ + +#define CLK_VDEC_LARB1 0 +#define CLK_VDEC_LAT 1 +#define CLK_VDEC_LAT_ACTIVE 2 +#define CLK_VDEC_VDEC 3 +#define CLK_VDEC_ACTIVE 4 +#define CLK_VDEC_NR_CLK 5 + +/* VENCSYS */ + +#define CLK_VENC_SET0_LARB 0 +#define CLK_VENC_SET1_VENC 1 +#define CLK_VENC_SET2_JPGENC 2 +#define CLK_VENC_SET5_GALS 3 +#define CLK_VENC_NR_CLK 4 + +/* CAMSYS */ + +#define CLK_CAM_LARB13 0 +#define CLK_CAM_DFP_VAD 1 +#define CLK_CAM_LARB14 2 +#define CLK_CAM_CAM 3 +#define CLK_CAM_CAMTG 4 +#define CLK_CAM_SENINF 5 +#define CLK_CAM_CAMSV0 6 +#define CLK_CAM_CAMSV1 7 +#define CLK_CAM_CAMSV2 8 +#define CLK_CAM_CAMSV3 9 +#define CLK_CAM_CCU0 10 +#define CLK_CAM_CCU1 11 +#define CLK_CAM_MRAW0 12 +#define CLK_CAM_FAKE_ENG 13 +#define CLK_CAM_CCU_GALS 14 +#define CLK_CAM_CAM2MM_GALS 15 +#define CLK_CAM_NR_CLK 16 + +/* CAMSYS_RAWA */ + +#define CLK_CAM_RAWA_LARBX 0 +#define CLK_CAM_RAWA_CAM 1 +#define CLK_CAM_RAWA_CAMTG 2 +#define CLK_CAM_RAWA_NR_CLK 3 + +/* CAMSYS_RAWB */ + +#define CLK_CAM_RAWB_LARBX 0 +#define CLK_CAM_RAWB_CAM 1 +#define CLK_CAM_RAWB_CAMTG 2 +#define CLK_CAM_RAWB_NR_CLK 3 + +/* CAMSYS_RAWC */ + +#define CLK_CAM_RAWC_LARBX 0 +#define CLK_CAM_RAWC_CAM 1 +#define CLK_CAM_RAWC_CAMTG 2 +#define CLK_CAM_RAWC_NR_CLK 3 + +/* IPESYS */ + +#define CLK_IPE_LARB19 0 +#define CLK_IPE_LARB20 1 +#define CLK_IPE_SMI_SUBCOM 2 +#define CLK_IPE_FD 3 +#define CLK_IPE_FE 4 +#define CLK_IPE_RSC 5 +#define CLK_IPE_DPE 6 +#define CLK_IPE_GALS 7 +#define CLK_IPE_NR_CLK 8 + +/* MDPSYS */ + +#define CLK_MDP_RDMA0 0 +#define CLK_MDP_TDSHP0 1 +#define CLK_MDP_IMG_DL_ASYNC0 2 +#define CLK_MDP_IMG_DL_ASYNC1 3 +#define CLK_MDP_RDMA1 4 +#define CLK_MDP_TDSHP1 5 +#define CLK_MDP_SMI0 6 +#define CLK_MDP_APB_BUS 7 +#define CLK_MDP_WROT0 8 +#define CLK_MDP_RSZ0 9 +#define CLK_MDP_HDR0 10 +#define CLK_MDP_MUTEX0 11 +#define CLK_MDP_WROT1 12 +#define CLK_MDP_RSZ1 13 +#define CLK_MDP_HDR1 14 +#define CLK_MDP_FAKE_ENG0 15 +#define CLK_MDP_AAL0 16 +#define CLK_MDP_AAL1 17 +#define CLK_MDP_COLOR0 18 +#define CLK_MDP_COLOR1 19 +#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 20 +#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 21 +#define CLK_MDP_NR_CLK 22 + +#endif /* _DT_BINDINGS_CLK_MT8192_H */ diff --git a/include/dt-bindings/clock/qcom,camcc-sm8250.h b/include/dt-bindings/clock/qcom,camcc-sm8250.h new file mode 100644 index 000000000000..383ead17608d --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sm8250.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_AXI_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 6 +#define CAM_CC_CAMNOC_DCD_XO_CLK 7 +#define CAM_CC_CCI_0_CLK 8 +#define CAM_CC_CCI_0_CLK_SRC 9 +#define CAM_CC_CCI_1_CLK 10 +#define CAM_CC_CCI_1_CLK_SRC 11 +#define CAM_CC_CORE_AHB_CLK 12 +#define CAM_CC_CPAS_AHB_CLK 13 +#define CAM_CC_CPHY_RX_CLK_SRC 14 +#define CAM_CC_CSI0PHYTIMER_CLK 15 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16 +#define CAM_CC_CSI1PHYTIMER_CLK 17 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18 +#define CAM_CC_CSI2PHYTIMER_CLK 19 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20 +#define CAM_CC_CSI3PHYTIMER_CLK 21 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22 +#define CAM_CC_CSI4PHYTIMER_CLK 23 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 24 +#define CAM_CC_CSI5PHYTIMER_CLK 25 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 26 +#define CAM_CC_CSIPHY0_CLK 27 +#define CAM_CC_CSIPHY1_CLK 28 +#define CAM_CC_CSIPHY2_CLK 29 +#define CAM_CC_CSIPHY3_CLK 30 +#define CAM_CC_CSIPHY4_CLK 31 +#define CAM_CC_CSIPHY5_CLK 32 +#define CAM_CC_FAST_AHB_CLK_SRC 33 +#define CAM_CC_FD_CORE_CLK 34 +#define CAM_CC_FD_CORE_CLK_SRC 35 +#define CAM_CC_FD_CORE_UAR_CLK 36 +#define CAM_CC_GDSC_CLK 37 +#define CAM_CC_ICP_AHB_CLK 38 +#define CAM_CC_ICP_CLK 39 +#define CAM_CC_ICP_CLK_SRC 40 +#define CAM_CC_IFE_0_AHB_CLK 41 +#define CAM_CC_IFE_0_AREG_CLK 42 +#define CAM_CC_IFE_0_AXI_CLK 43 +#define CAM_CC_IFE_0_CLK 44 +#define CAM_CC_IFE_0_CLK_SRC 45 +#define CAM_CC_IFE_0_CPHY_RX_CLK 46 +#define CAM_CC_IFE_0_CSID_CLK 47 +#define CAM_CC_IFE_0_CSID_CLK_SRC 48 +#define CAM_CC_IFE_0_DSP_CLK 49 +#define CAM_CC_IFE_1_AHB_CLK 50 +#define CAM_CC_IFE_1_AREG_CLK 51 +#define CAM_CC_IFE_1_AXI_CLK 52 +#define CAM_CC_IFE_1_CLK 53 +#define CAM_CC_IFE_1_CLK_SRC 54 +#define CAM_CC_IFE_1_CPHY_RX_CLK 55 +#define CAM_CC_IFE_1_CSID_CLK 56 +#define CAM_CC_IFE_1_CSID_CLK_SRC 57 +#define CAM_CC_IFE_1_DSP_CLK 58 +#define CAM_CC_IFE_LITE_AHB_CLK 59 +#define CAM_CC_IFE_LITE_AXI_CLK 60 +#define CAM_CC_IFE_LITE_CLK 61 +#define CAM_CC_IFE_LITE_CLK_SRC 62 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 63 +#define CAM_CC_IFE_LITE_CSID_CLK 64 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 65 +#define CAM_CC_IPE_0_AHB_CLK 66 +#define CAM_CC_IPE_0_AREG_CLK 67 +#define CAM_CC_IPE_0_AXI_CLK 68 +#define CAM_CC_IPE_0_CLK 69 +#define CAM_CC_IPE_0_CLK_SRC 70 +#define CAM_CC_JPEG_CLK 71 +#define CAM_CC_JPEG_CLK_SRC 72 +#define CAM_CC_MCLK0_CLK 73 +#define CAM_CC_MCLK0_CLK_SRC 74 +#define CAM_CC_MCLK1_CLK 75 +#define CAM_CC_MCLK1_CLK_SRC 76 +#define CAM_CC_MCLK2_CLK 77 +#define CAM_CC_MCLK2_CLK_SRC 78 +#define CAM_CC_MCLK3_CLK 79 +#define CAM_CC_MCLK3_CLK_SRC 80 +#define CAM_CC_MCLK4_CLK 81 +#define CAM_CC_MCLK4_CLK_SRC 82 +#define CAM_CC_MCLK5_CLK 83 +#define CAM_CC_MCLK5_CLK_SRC 84 +#define CAM_CC_MCLK6_CLK 85 +#define CAM_CC_MCLK6_CLK_SRC 86 +#define CAM_CC_PLL0 87 +#define CAM_CC_PLL0_OUT_EVEN 88 +#define CAM_CC_PLL0_OUT_ODD 89 +#define CAM_CC_PLL1 90 +#define CAM_CC_PLL1_OUT_EVEN 91 +#define CAM_CC_PLL2 92 +#define CAM_CC_PLL2_OUT_MAIN 93 +#define CAM_CC_PLL3 94 +#define CAM_CC_PLL3_OUT_EVEN 95 +#define CAM_CC_PLL4 96 +#define CAM_CC_PLL4_OUT_EVEN 97 +#define CAM_CC_SBI_AHB_CLK 98 +#define CAM_CC_SBI_AXI_CLK 99 +#define CAM_CC_SBI_CLK 100 +#define CAM_CC_SBI_CPHY_RX_CLK 101 +#define CAM_CC_SBI_CSID_CLK 102 +#define CAM_CC_SBI_CSID_CLK_SRC 103 +#define CAM_CC_SBI_DIV_CLK_SRC 104 +#define CAM_CC_SBI_IFE_0_CLK 105 +#define CAM_CC_SBI_IFE_1_CLK 106 +#define CAM_CC_SLEEP_CLK 107 +#define CAM_CC_SLEEP_CLK_SRC 108 +#define CAM_CC_SLOW_AHB_CLK_SRC 109 +#define CAM_CC_XO_CLK_SRC 110 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_ICP_BCR 1 +#define CAM_CC_IFE_0_BCR 2 +#define CAM_CC_IFE_1_BCR 3 +#define CAM_CC_IPE_0_BCR 4 +#define CAM_CC_SBI_BCR 5 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define SBI_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define TITAN_TOP_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h new file mode 100644 index 000000000000..a4a692c20acf --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H + +/* DISP_CC clocks */ +#define DISP_CC_PLL0 0 +#define DISP_CC_MDSS_AHB_CLK 1 +#define DISP_CC_MDSS_AHB_CLK_SRC 2 +#define DISP_CC_MDSS_BYTE0_CLK 3 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 6 +#define DISP_CC_MDSS_DP_AUX_CLK 7 +#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8 +#define DISP_CC_MDSS_DP_CRYPTO_CLK 9 +#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10 +#define DISP_CC_MDSS_DP_LINK_CLK 11 +#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12 +#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13 +#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14 +#define DISP_CC_MDSS_DP_PIXEL_CLK 15 +#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16 +#define DISP_CC_MDSS_EDP_AUX_CLK 17 +#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 18 +#define DISP_CC_MDSS_EDP_LINK_CLK 19 +#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 20 +#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 21 +#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 22 +#define DISP_CC_MDSS_EDP_PIXEL_CLK 23 +#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 24 +#define DISP_CC_MDSS_ESC0_CLK 25 +#define DISP_CC_MDSS_ESC0_CLK_SRC 26 +#define DISP_CC_MDSS_MDP_CLK 27 +#define DISP_CC_MDSS_MDP_CLK_SRC 28 +#define DISP_CC_MDSS_MDP_LUT_CLK 29 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 30 +#define DISP_CC_MDSS_PCLK0_CLK 31 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 32 +#define DISP_CC_MDSS_ROT_CLK 33 +#define DISP_CC_MDSS_ROT_CLK_SRC 34 +#define DISP_CC_MDSS_RSCC_AHB_CLK 35 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36 +#define DISP_CC_MDSS_VSYNC_CLK 37 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 38 +#define DISP_CC_SLEEP_CLK 39 +#define DISP_CC_XO_CLK 40 + +/* DISP_CC power domains */ +#define DISP_CC_MDSS_CORE_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h index fdaca6ad5c85..ce001cbbc27f 100644 --- a/include/dt-bindings/clock/qcom,dispcc-sm8250.h +++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h @@ -55,6 +55,15 @@ #define DISP_CC_MDSS_VSYNC_CLK_SRC 45 #define DISP_CC_PLL0 46 #define DISP_CC_PLL1 47 +#define DISP_CC_MDSS_EDP_AUX_CLK 48 +#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49 +#define DISP_CC_MDSS_EDP_GTC_CLK 50 +#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51 +#define DISP_CC_MDSS_EDP_LINK_CLK 52 +#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53 +#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54 +#define DISP_CC_MDSS_EDP_PIXEL_CLK 55 +#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56 /* DISP_CC Reset */ #define DISP_CC_MDSS_CORE_BCR 0 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9607.h b/include/dt-bindings/clock/qcom,gcc-mdm9607.h new file mode 100644 index 000000000000..357a680a40da --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-mdm9607.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org> + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_9607_H +#define _DT_BINDINGS_CLK_MSM_GCC_9607_H + +#define GPLL0 0 +#define GPLL0_EARLY 1 +#define GPLL1 2 +#define GPLL1_VOTE 3 +#define GPLL2 4 +#define GPLL2_EARLY 5 +#define PCNOC_BFDCD_CLK_SRC 6 +#define SYSTEM_NOC_BFDCD_CLK_SRC 7 +#define GCC_SMMU_CFG_CLK 8 +#define APSS_AHB_CLK_SRC 9 +#define GCC_QDSS_DAP_CLK 10 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22 +#define BLSP1_UART1_APPS_CLK_SRC 23 +#define BLSP1_UART2_APPS_CLK_SRC 24 +#define CRYPTO_CLK_SRC 25 +#define GP1_CLK_SRC 26 +#define GP2_CLK_SRC 27 +#define GP3_CLK_SRC 28 +#define PDM2_CLK_SRC 29 +#define SDCC1_APPS_CLK_SRC 30 +#define SDCC2_APPS_CLK_SRC 31 +#define APSS_TCU_CLK_SRC 32 +#define USB_HS_SYSTEM_CLK_SRC 33 +#define GCC_BLSP1_AHB_CLK 34 +#define GCC_BLSP1_SLEEP_CLK 35 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 44 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 45 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 46 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 47 +#define GCC_BLSP1_UART1_APPS_CLK 48 +#define GCC_BLSP1_UART2_APPS_CLK 49 +#define GCC_BOOT_ROM_AHB_CLK 50 +#define GCC_CRYPTO_AHB_CLK 51 +#define GCC_CRYPTO_AXI_CLK 52 +#define GCC_CRYPTO_CLK 53 +#define GCC_GP1_CLK 54 +#define GCC_GP2_CLK 55 +#define GCC_GP3_CLK 56 +#define GCC_MSS_CFG_AHB_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM_AHB_CLK 59 +#define GCC_PRNG_AHB_CLK 60 +#define GCC_SDCC1_AHB_CLK 61 +#define GCC_SDCC1_APPS_CLK 62 +#define GCC_SDCC2_AHB_CLK 63 +#define GCC_SDCC2_APPS_CLK 64 +#define GCC_USB2A_PHY_SLEEP_CLK 65 +#define GCC_USB_HS_AHB_CLK 66 +#define GCC_USB_HS_SYSTEM_CLK 67 +#define GCC_APSS_TCU_CLK 68 +#define GCC_MSS_Q6_BIMC_AXI_CLK 69 +#define BIMC_PLL 70 +#define BIMC_PLL_VOTE 71 +#define BIMC_DDR_CLK_SRC 72 +#define BLSP1_UART3_APPS_CLK_SRC 73 +#define BLSP1_UART4_APPS_CLK_SRC 74 +#define BLSP1_UART5_APPS_CLK_SRC 75 +#define BLSP1_UART6_APPS_CLK_SRC 76 +#define GCC_BLSP1_UART3_APPS_CLK 77 +#define GCC_BLSP1_UART4_APPS_CLK 78 +#define GCC_BLSP1_UART5_APPS_CLK 79 +#define GCC_BLSP1_UART6_APPS_CLK 80 +#define GCC_APSS_AHB_CLK 81 +#define GCC_APSS_AXI_CLK 82 +#define GCC_USB_HS_PHY_CFG_AHB_CLK 83 +#define GCC_USB_HSIC_CLK_SRC 84 +#define GCC_USB_HSIC_IO_CAL_CLK_SRC 85 +#define GCC_USB_HSIC_SYSTEM_CLK_SRC 86 + +/* Resets */ +#define USB2_HS_PHY_ONLY_BCR 0 +#define QUSB2_PHY_BCR 1 +#define GCC_MSS_RESTART 2 +#define USB_HS_HSIC_BCR 3 +#define USB_HS_BCR 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8953.h b/include/dt-bindings/clock/qcom,gcc-msm8953.h new file mode 100644 index 000000000000..783162da6148 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8953.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8953_H +#define _DT_BINDINGS_CLK_MSM_GCC_8953_H + +/* Clocks */ +#define APC0_DROOP_DETECTOR_CLK_SRC 0 +#define APC1_DROOP_DETECTOR_CLK_SRC 1 +#define APSS_AHB_CLK_SRC 2 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10 +#define BLSP1_UART1_APPS_CLK_SRC 11 +#define BLSP1_UART2_APPS_CLK_SRC 12 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 13 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 14 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 15 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 16 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 17 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 18 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 19 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 20 +#define BLSP2_UART1_APPS_CLK_SRC 21 +#define BLSP2_UART2_APPS_CLK_SRC 22 +#define BYTE0_CLK_SRC 23 +#define BYTE1_CLK_SRC 24 +#define CAMSS_GP0_CLK_SRC 25 +#define CAMSS_GP1_CLK_SRC 26 +#define CAMSS_TOP_AHB_CLK_SRC 27 +#define CCI_CLK_SRC 28 +#define CPP_CLK_SRC 29 +#define CRYPTO_CLK_SRC 30 +#define CSI0PHYTIMER_CLK_SRC 31 +#define CSI0P_CLK_SRC 32 +#define CSI0_CLK_SRC 33 +#define CSI1PHYTIMER_CLK_SRC 34 +#define CSI1P_CLK_SRC 35 +#define CSI1_CLK_SRC 36 +#define CSI2PHYTIMER_CLK_SRC 37 +#define CSI2P_CLK_SRC 38 +#define CSI2_CLK_SRC 39 +#define ESC0_CLK_SRC 40 +#define ESC1_CLK_SRC 41 +#define GCC_APC0_DROOP_DETECTOR_GPLL0_CLK 42 +#define GCC_APC1_DROOP_DETECTOR_GPLL0_CLK 43 +#define GCC_APSS_AHB_CLK 44 +#define GCC_APSS_AXI_CLK 45 +#define GCC_APSS_TCU_ASYNC_CLK 46 +#define GCC_BIMC_GFX_CLK 47 +#define GCC_BIMC_GPU_CLK 48 +#define GCC_BLSP1_AHB_CLK 49 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 50 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 51 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 52 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 53 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 54 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 55 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 56 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 57 +#define GCC_BLSP1_UART1_APPS_CLK 58 +#define GCC_BLSP1_UART2_APPS_CLK 59 +#define GCC_BLSP2_AHB_CLK 60 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 61 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 62 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 63 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 64 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 65 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 66 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 67 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 68 +#define GCC_BLSP2_UART1_APPS_CLK 69 +#define GCC_BLSP2_UART2_APPS_CLK 70 +#define GCC_BOOT_ROM_AHB_CLK 71 +#define GCC_CAMSS_AHB_CLK 72 +#define GCC_CAMSS_CCI_AHB_CLK 73 +#define GCC_CAMSS_CCI_CLK 74 +#define GCC_CAMSS_CPP_AHB_CLK 75 +#define GCC_CAMSS_CPP_AXI_CLK 76 +#define GCC_CAMSS_CPP_CLK 77 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 78 +#define GCC_CAMSS_CSI0PHY_CLK 79 +#define GCC_CAMSS_CSI0PIX_CLK 80 +#define GCC_CAMSS_CSI0RDI_CLK 81 +#define GCC_CAMSS_CSI0_AHB_CLK 82 +#define GCC_CAMSS_CSI0_CLK 83 +#define GCC_CAMSS_CSI0_CSIPHY_3P_CLK 84 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 85 +#define GCC_CAMSS_CSI1PHY_CLK 86 +#define GCC_CAMSS_CSI1PIX_CLK 87 +#define GCC_CAMSS_CSI1RDI_CLK 88 +#define GCC_CAMSS_CSI1_AHB_CLK 89 +#define GCC_CAMSS_CSI1_CLK 90 +#define GCC_CAMSS_CSI1_CSIPHY_3P_CLK 91 +#define GCC_CAMSS_CSI2PHYTIMER_CLK 92 +#define GCC_CAMSS_CSI2PHY_CLK 93 +#define GCC_CAMSS_CSI2PIX_CLK 94 +#define GCC_CAMSS_CSI2RDI_CLK 95 +#define GCC_CAMSS_CSI2_AHB_CLK 96 +#define GCC_CAMSS_CSI2_CLK 97 +#define GCC_CAMSS_CSI2_CSIPHY_3P_CLK 98 +#define GCC_CAMSS_CSI_VFE0_CLK 99 +#define GCC_CAMSS_CSI_VFE1_CLK 100 +#define GCC_CAMSS_GP0_CLK 101 +#define GCC_CAMSS_GP1_CLK 102 +#define GCC_CAMSS_ISPIF_AHB_CLK 103 +#define GCC_CAMSS_JPEG0_CLK 104 +#define GCC_CAMSS_JPEG_AHB_CLK 105 +#define GCC_CAMSS_JPEG_AXI_CLK 106 +#define GCC_CAMSS_MCLK0_CLK 107 +#define GCC_CAMSS_MCLK1_CLK 108 +#define GCC_CAMSS_MCLK2_CLK 109 +#define GCC_CAMSS_MCLK3_CLK 110 +#define GCC_CAMSS_MICRO_AHB_CLK 111 +#define GCC_CAMSS_TOP_AHB_CLK 112 +#define GCC_CAMSS_VFE0_AHB_CLK 113 +#define GCC_CAMSS_VFE0_AXI_CLK 114 +#define GCC_CAMSS_VFE0_CLK 115 +#define GCC_CAMSS_VFE1_AHB_CLK 116 +#define GCC_CAMSS_VFE1_AXI_CLK 117 +#define GCC_CAMSS_VFE1_CLK 118 +#define GCC_CPP_TBU_CLK 119 +#define GCC_CRYPTO_AHB_CLK 120 +#define GCC_CRYPTO_AXI_CLK 121 +#define GCC_CRYPTO_CLK 122 +#define GCC_DCC_CLK 123 +#define GCC_GP1_CLK 124 +#define GCC_GP2_CLK 125 +#define GCC_GP3_CLK 126 +#define GCC_JPEG_TBU_CLK 127 +#define GCC_MDP_TBU_CLK 128 +#define GCC_MDSS_AHB_CLK 129 +#define GCC_MDSS_AXI_CLK 130 +#define GCC_MDSS_BYTE0_CLK 131 +#define GCC_MDSS_BYTE1_CLK 132 +#define GCC_MDSS_ESC0_CLK 133 +#define GCC_MDSS_ESC1_CLK 134 +#define GCC_MDSS_MDP_CLK 135 +#define GCC_MDSS_PCLK0_CLK 136 +#define GCC_MDSS_PCLK1_CLK 137 +#define GCC_MDSS_VSYNC_CLK 138 +#define GCC_MSS_CFG_AHB_CLK 139 +#define GCC_MSS_Q6_BIMC_AXI_CLK 140 +#define GCC_OXILI_AHB_CLK 141 +#define GCC_OXILI_AON_CLK 142 +#define GCC_OXILI_GFX3D_CLK 143 +#define GCC_OXILI_TIMER_CLK 144 +#define GCC_PCNOC_USB3_AXI_CLK 145 +#define GCC_PDM2_CLK 146 +#define GCC_PDM_AHB_CLK 147 +#define GCC_PRNG_AHB_CLK 148 +#define GCC_QDSS_DAP_CLK 149 +#define GCC_QUSB_REF_CLK 150 +#define GCC_RBCPR_GFX_CLK 151 +#define GCC_SDCC1_AHB_CLK 152 +#define GCC_SDCC1_APPS_CLK 153 +#define GCC_SDCC1_ICE_CORE_CLK 154 +#define GCC_SDCC2_AHB_CLK 155 +#define GCC_SDCC2_APPS_CLK 156 +#define GCC_SMMU_CFG_CLK 157 +#define GCC_USB30_MASTER_CLK 158 +#define GCC_USB30_MOCK_UTMI_CLK 159 +#define GCC_USB30_SLEEP_CLK 160 +#define GCC_USB3_AUX_CLK 161 +#define GCC_USB3_PIPE_CLK 162 +#define GCC_USB_PHY_CFG_AHB_CLK 163 +#define GCC_USB_SS_REF_CLK 164 +#define GCC_VENUS0_AHB_CLK 165 +#define GCC_VENUS0_AXI_CLK 166 +#define GCC_VENUS0_CORE0_VCODEC0_CLK 167 +#define GCC_VENUS0_VCODEC0_CLK 168 +#define GCC_VENUS_TBU_CLK 169 +#define GCC_VFE1_TBU_CLK 170 +#define GCC_VFE_TBU_CLK 171 +#define GFX3D_CLK_SRC 172 +#define GP1_CLK_SRC 173 +#define GP2_CLK_SRC 174 +#define GP3_CLK_SRC 175 +#define GPLL0 176 +#define GPLL0_EARLY 177 +#define GPLL2 178 +#define GPLL2_EARLY 179 +#define GPLL3 180 +#define GPLL3_EARLY 181 +#define GPLL4 182 +#define GPLL4_EARLY 183 +#define GPLL6 184 +#define GPLL6_EARLY 185 +#define JPEG0_CLK_SRC 186 +#define MCLK0_CLK_SRC 187 +#define MCLK1_CLK_SRC 188 +#define MCLK2_CLK_SRC 189 +#define MCLK3_CLK_SRC 190 +#define MDP_CLK_SRC 191 +#define PCLK0_CLK_SRC 192 +#define PCLK1_CLK_SRC 193 +#define PDM2_CLK_SRC 194 +#define RBCPR_GFX_CLK_SRC 195 +#define SDCC1_APPS_CLK_SRC 196 +#define SDCC1_ICE_CORE_CLK_SRC 197 +#define SDCC2_APPS_CLK_SRC 198 +#define USB30_MASTER_CLK_SRC 199 +#define USB30_MOCK_UTMI_CLK_SRC 200 +#define USB3_AUX_CLK_SRC 201 +#define VCODEC0_CLK_SRC 202 +#define VFE0_CLK_SRC 203 +#define VFE1_CLK_SRC 204 +#define VSYNC_CLK_SRC 205 + +/* GCC block resets */ +#define GCC_CAMSS_MICRO_BCR 0 +#define GCC_MSS_BCR 1 +#define GCC_QUSB2_PHY_BCR 2 +#define GCC_USB3PHY_PHY_BCR 3 +#define GCC_USB3_PHY_BCR 4 +#define GCC_USB_30_BCR 5 + +/* GDSCs */ +#define CPP_GDSC 0 +#define JPEG_GDSC 1 +#define MDSS_GDSC 2 +#define OXILI_CX_GDSC 3 +#define OXILI_GX_GDSC 4 +#define USB30_GDSC 5 +#define VENUS_CORE0_GDSC 6 +#define VENUS_GDSC 7 +#define VFE0_GDSC 8 +#define VFE1_GDSC 9 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sc7280.h b/include/dt-bindings/clock/qcom,gcc-sc7280.h index 4394f15111c6..3d5724b79bff 100644 --- a/include/dt-bindings/clock/qcom,gcc-sc7280.h +++ b/include/dt-bindings/clock/qcom,gcc-sc7280.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ diff --git a/include/dt-bindings/clock/qcom,gcc-sm6115.h b/include/dt-bindings/clock/qcom,gcc-sm6115.h new file mode 100644 index 000000000000..b91a7b460433 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm6115.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H + +/* GCC clocks */ +#define GPLL0 0 +#define GPLL0_OUT_AUX2 1 +#define GPLL0_OUT_MAIN 2 +#define GPLL10 3 +#define GPLL10_OUT_MAIN 4 +#define GPLL11 5 +#define GPLL11_OUT_MAIN 6 +#define GPLL3 7 +#define GPLL4 8 +#define GPLL4_OUT_MAIN 9 +#define GPLL6 10 +#define GPLL6_OUT_MAIN 11 +#define GPLL7 12 +#define GPLL7_OUT_MAIN 13 +#define GPLL8 14 +#define GPLL8_OUT_MAIN 15 +#define GPLL9 16 +#define GPLL9_OUT_MAIN 17 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 18 +#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 19 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 20 +#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 21 +#define GCC_CAMSS_CSI2PHYTIMER_CLK 22 +#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 23 +#define GCC_CAMSS_MCLK0_CLK 24 +#define GCC_CAMSS_MCLK0_CLK_SRC 25 +#define GCC_CAMSS_MCLK1_CLK 26 +#define GCC_CAMSS_MCLK1_CLK_SRC 27 +#define GCC_CAMSS_MCLK2_CLK 28 +#define GCC_CAMSS_MCLK2_CLK_SRC 29 +#define GCC_CAMSS_MCLK3_CLK 30 +#define GCC_CAMSS_MCLK3_CLK_SRC 31 +#define GCC_CAMSS_NRT_AXI_CLK 32 +#define GCC_CAMSS_OPE_AHB_CLK 33 +#define GCC_CAMSS_OPE_AHB_CLK_SRC 34 +#define GCC_CAMSS_OPE_CLK 35 +#define GCC_CAMSS_OPE_CLK_SRC 36 +#define GCC_CAMSS_RT_AXI_CLK 37 +#define GCC_CAMSS_TFE_0_CLK 38 +#define GCC_CAMSS_TFE_0_CLK_SRC 39 +#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 40 +#define GCC_CAMSS_TFE_0_CSID_CLK 41 +#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 42 +#define GCC_CAMSS_TFE_1_CLK 43 +#define GCC_CAMSS_TFE_1_CLK_SRC 44 +#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 45 +#define GCC_CAMSS_TFE_1_CSID_CLK 46 +#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 47 +#define GCC_CAMSS_TFE_2_CLK 48 +#define GCC_CAMSS_TFE_2_CLK_SRC 49 +#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 50 +#define GCC_CAMSS_TFE_2_CSID_CLK 51 +#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 52 +#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 53 +#define GCC_CAMSS_TOP_AHB_CLK 54 +#define GCC_CAMSS_TOP_AHB_CLK_SRC 55 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 56 +#define GCC_CPUSS_AHB_CLK 57 +#define GCC_CPUSS_GNOC_CLK 60 +#define GCC_DISP_AHB_CLK 61 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 62 +#define GCC_DISP_HF_AXI_CLK 63 +#define GCC_DISP_THROTTLE_CORE_CLK 64 +#define GCC_DISP_XO_CLK 65 +#define GCC_GP1_CLK 66 +#define GCC_GP1_CLK_SRC 67 +#define GCC_GP2_CLK 68 +#define GCC_GP2_CLK_SRC 69 +#define GCC_GP3_CLK 70 +#define GCC_GP3_CLK_SRC 71 +#define GCC_GPU_CFG_AHB_CLK 72 +#define GCC_GPU_GPLL0_CLK_SRC 73 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 74 +#define GCC_GPU_IREF_CLK 75 +#define GCC_GPU_MEMNOC_GFX_CLK 76 +#define GCC_GPU_SNOC_DVM_GFX_CLK 77 +#define GCC_GPU_THROTTLE_CORE_CLK 78 +#define GCC_GPU_THROTTLE_XO_CLK 79 +#define GCC_PDM2_CLK 80 +#define GCC_PDM2_CLK_SRC 81 +#define GCC_PDM_AHB_CLK 82 +#define GCC_PDM_XO4_CLK 83 +#define GCC_PRNG_AHB_CLK 84 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 86 +#define GCC_QMIP_DISP_AHB_CLK 87 +#define GCC_QMIP_GPU_CFG_AHB_CLK 88 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 90 +#define GCC_QUPV3_WRAP0_CORE_CLK 91 +#define GCC_QUPV3_WRAP0_S0_CLK 92 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 93 +#define GCC_QUPV3_WRAP0_S1_CLK 94 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 95 +#define GCC_QUPV3_WRAP0_S2_CLK 96 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 97 +#define GCC_QUPV3_WRAP0_S3_CLK 98 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 99 +#define GCC_QUPV3_WRAP0_S4_CLK 100 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 101 +#define GCC_QUPV3_WRAP0_S5_CLK 102 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 103 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105 +#define GCC_SDCC1_AHB_CLK 106 +#define GCC_SDCC1_APPS_CLK 107 +#define GCC_SDCC1_APPS_CLK_SRC 108 +#define GCC_SDCC1_ICE_CORE_CLK 109 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 110 +#define GCC_SDCC2_AHB_CLK 111 +#define GCC_SDCC2_APPS_CLK 112 +#define GCC_SDCC2_APPS_CLK_SRC 113 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 114 +#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 115 +#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 116 +#define GCC_UFS_PHY_AHB_CLK 117 +#define GCC_UFS_PHY_AXI_CLK 118 +#define GCC_UFS_PHY_AXI_CLK_SRC 119 +#define GCC_UFS_PHY_ICE_CORE_CLK 120 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 121 +#define GCC_UFS_PHY_PHY_AUX_CLK 122 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 123 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127 +#define GCC_USB30_PRIM_MASTER_CLK 128 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 129 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 132 +#define GCC_USB30_PRIM_SLEEP_CLK 133 +#define GCC_USB3_PRIM_CLKREF_CLK 134 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 137 +#define GCC_VCODEC0_AXI_CLK 138 +#define GCC_VENUS_AHB_CLK 139 +#define GCC_VENUS_CTL_AXI_CLK 140 +#define GCC_VIDEO_AHB_CLK 141 +#define GCC_VIDEO_AXI0_CLK 142 +#define GCC_VIDEO_THROTTLE_CORE_CLK 143 +#define GCC_VIDEO_VCODEC0_SYS_CLK 144 +#define GCC_VIDEO_VENUS_CLK_SRC 145 +#define GCC_VIDEO_VENUS_CTL_CLK 146 +#define GCC_VIDEO_XO_CLK 147 +#define GCC_AHB2PHY_CSI_CLK 148 +#define GCC_AHB2PHY_USB_CLK 149 +#define GCC_BIMC_GPU_AXI_CLK 150 +#define GCC_BOOT_ROM_AHB_CLK 151 +#define GCC_CAM_THROTTLE_NRT_CLK 152 +#define GCC_CAM_THROTTLE_RT_CLK 153 +#define GCC_CAMERA_AHB_CLK 154 +#define GCC_CAMERA_XO_CLK 155 +#define GCC_CAMSS_AXI_CLK 156 +#define GCC_CAMSS_AXI_CLK_SRC 157 +#define GCC_CAMSS_CAMNOC_ATB_CLK 158 +#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 159 +#define GCC_CAMSS_CCI_0_CLK 160 +#define GCC_CAMSS_CCI_CLK_SRC 161 +#define GCC_CAMSS_CPHY_0_CLK 162 +#define GCC_CAMSS_CPHY_1_CLK 163 +#define GCC_CAMSS_CPHY_2_CLK 164 +#define GCC_UFS_CLKREF_CLK 165 +#define GCC_DISP_GPLL0_CLK_SRC 166 + +/* GCC resets */ +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_SDCC1_BCR 2 +#define GCC_UFS_PHY_BCR 3 +#define GCC_USB30_PRIM_BCR 4 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 5 +#define GCC_VCODEC0_BCR 6 +#define GCC_VENUS_BCR 7 +#define GCC_VIDEO_INTERFACE_BCR 8 +#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 9 +#define GCC_USB3_PHY_PRIM_SP0_BCR 10 +#define GCC_SDCC2_BCR 11 + +/* Indexes for GDSCs */ +#define GCC_CAMSS_TOP_GDSC 0 +#define GCC_UFS_PHY_GDSC 1 +#define GCC_USB30_PRIM_GDSC 2 +#define GCC_VCODEC0_GDSC 3 +#define GCC_VENUS_GDSC 4 +#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 5 +#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 6 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6125.h b/include/dt-bindings/clock/qcom,gcc-sm6125.h new file mode 100644 index 000000000000..08ea18086824 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm6125.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H + +#define GPLL0_OUT_AUX2 0 +#define GPLL0_OUT_MAIN 1 +#define GPLL6_OUT_MAIN 2 +#define GPLL7_OUT_MAIN 3 +#define GPLL8_OUT_MAIN 4 +#define GPLL9_OUT_MAIN 5 +#define GPLL0_OUT_EARLY 6 +#define GPLL3_OUT_EARLY 7 +#define GPLL4_OUT_MAIN 8 +#define GPLL5_OUT_MAIN 9 +#define GPLL6_OUT_EARLY 10 +#define GPLL7_OUT_EARLY 11 +#define GPLL8_OUT_EARLY 12 +#define GPLL9_OUT_EARLY 13 +#define GCC_AHB2PHY_CSI_CLK 14 +#define GCC_AHB2PHY_USB_CLK 15 +#define GCC_APC_VS_CLK 16 +#define GCC_BOOT_ROM_AHB_CLK 17 +#define GCC_CAMERA_AHB_CLK 18 +#define GCC_CAMERA_XO_CLK 19 +#define GCC_CAMSS_AHB_CLK_SRC 20 +#define GCC_CAMSS_CCI_AHB_CLK 21 +#define GCC_CAMSS_CCI_CLK 22 +#define GCC_CAMSS_CCI_CLK_SRC 23 +#define GCC_CAMSS_CPHY_CSID0_CLK 24 +#define GCC_CAMSS_CPHY_CSID1_CLK 25 +#define GCC_CAMSS_CPHY_CSID2_CLK 26 +#define GCC_CAMSS_CPHY_CSID3_CLK 27 +#define GCC_CAMSS_CPP_AHB_CLK 28 +#define GCC_CAMSS_CPP_AXI_CLK 29 +#define GCC_CAMSS_CPP_CLK 30 +#define GCC_CAMSS_CPP_CLK_SRC 31 +#define GCC_CAMSS_CPP_VBIF_AHB_CLK 32 +#define GCC_CAMSS_CSI0_AHB_CLK 33 +#define GCC_CAMSS_CSI0_CLK 34 +#define GCC_CAMSS_CSI0_CLK_SRC 35 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 36 +#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 37 +#define GCC_CAMSS_CSI0PIX_CLK 38 +#define GCC_CAMSS_CSI0RDI_CLK 39 +#define GCC_CAMSS_CSI1_AHB_CLK 40 +#define GCC_CAMSS_CSI1_CLK 41 +#define GCC_CAMSS_CSI1_CLK_SRC 42 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 43 +#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 44 +#define GCC_CAMSS_CSI1PIX_CLK 45 +#define GCC_CAMSS_CSI1RDI_CLK 46 +#define GCC_CAMSS_CSI2_AHB_CLK 47 +#define GCC_CAMSS_CSI2_CLK 48 +#define GCC_CAMSS_CSI2_CLK_SRC 49 +#define GCC_CAMSS_CSI2PHYTIMER_CLK 50 +#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 51 +#define GCC_CAMSS_CSI2PIX_CLK 52 +#define GCC_CAMSS_CSI2RDI_CLK 53 +#define GCC_CAMSS_CSI3_AHB_CLK 54 +#define GCC_CAMSS_CSI3_CLK 55 +#define GCC_CAMSS_CSI3_CLK_SRC 56 +#define GCC_CAMSS_CSI3PIX_CLK 57 +#define GCC_CAMSS_CSI3RDI_CLK 58 +#define GCC_CAMSS_CSI_VFE0_CLK 59 +#define GCC_CAMSS_CSI_VFE1_CLK 60 +#define GCC_CAMSS_CSIPHY0_CLK 61 +#define GCC_CAMSS_CSIPHY1_CLK 62 +#define GCC_CAMSS_CSIPHY2_CLK 63 +#define GCC_CAMSS_CSIPHY_CLK_SRC 64 +#define GCC_CAMSS_GP0_CLK 65 +#define GCC_CAMSS_GP0_CLK_SRC 66 +#define GCC_CAMSS_GP1_CLK 67 +#define GCC_CAMSS_GP1_CLK_SRC 68 +#define GCC_CAMSS_ISPIF_AHB_CLK 69 +#define GCC_CAMSS_JPEG_AHB_CLK 70 +#define GCC_CAMSS_JPEG_AXI_CLK 71 +#define GCC_CAMSS_JPEG_CLK 72 +#define GCC_CAMSS_JPEG_CLK_SRC 73 +#define GCC_CAMSS_MCLK0_CLK 74 +#define GCC_CAMSS_MCLK0_CLK_SRC 75 +#define GCC_CAMSS_MCLK1_CLK 76 +#define GCC_CAMSS_MCLK1_CLK_SRC 77 +#define GCC_CAMSS_MCLK2_CLK 78 +#define GCC_CAMSS_MCLK2_CLK_SRC 79 +#define GCC_CAMSS_MCLK3_CLK 80 +#define GCC_CAMSS_MCLK3_CLK_SRC 81 +#define GCC_CAMSS_MICRO_AHB_CLK 82 +#define GCC_CAMSS_THROTTLE_NRT_AXI_CLK 83 +#define GCC_CAMSS_THROTTLE_RT_AXI_CLK 84 +#define GCC_CAMSS_TOP_AHB_CLK 85 +#define GCC_CAMSS_VFE0_AHB_CLK 86 +#define GCC_CAMSS_VFE0_CLK 87 +#define GCC_CAMSS_VFE0_CLK_SRC 88 +#define GCC_CAMSS_VFE0_STREAM_CLK 89 +#define GCC_CAMSS_VFE1_AHB_CLK 90 +#define GCC_CAMSS_VFE1_CLK 91 +#define GCC_CAMSS_VFE1_CLK_SRC 92 +#define GCC_CAMSS_VFE1_STREAM_CLK 93 +#define GCC_CAMSS_VFE_TSCTR_CLK 94 +#define GCC_CAMSS_VFE_VBIF_AHB_CLK 95 +#define GCC_CAMSS_VFE_VBIF_AXI_CLK 96 +#define GCC_CE1_AHB_CLK 97 +#define GCC_CE1_AXI_CLK 98 +#define GCC_CE1_CLK 99 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 100 +#define GCC_CPUSS_GNOC_CLK 101 +#define GCC_DISP_AHB_CLK 102 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 103 +#define GCC_DISP_HF_AXI_CLK 104 +#define GCC_DISP_THROTTLE_CORE_CLK 105 +#define GCC_DISP_XO_CLK 106 +#define GCC_GP1_CLK 107 +#define GCC_GP1_CLK_SRC 108 +#define GCC_GP2_CLK 109 +#define GCC_GP2_CLK_SRC 110 +#define GCC_GP3_CLK 111 +#define GCC_GP3_CLK_SRC 112 +#define GCC_GPU_CFG_AHB_CLK 113 +#define GCC_GPU_GPLL0_CLK_SRC 114 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 115 +#define GCC_GPU_MEMNOC_GFX_CLK 116 +#define GCC_GPU_SNOC_DVM_GFX_CLK 117 +#define GCC_GPU_THROTTLE_CORE_CLK 118 +#define GCC_GPU_THROTTLE_XO_CLK 119 +#define GCC_MSS_VS_CLK 120 +#define GCC_PDM2_CLK 121 +#define GCC_PDM2_CLK_SRC 122 +#define GCC_PDM_AHB_CLK 123 +#define GCC_PDM_XO4_CLK 124 +#define GCC_PRNG_AHB_CLK 125 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 126 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 127 +#define GCC_QMIP_DISP_AHB_CLK 128 +#define GCC_QMIP_GPU_CFG_AHB_CLK 129 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 130 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 131 +#define GCC_QUPV3_WRAP0_CORE_CLK 132 +#define GCC_QUPV3_WRAP0_S0_CLK 133 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 134 +#define GCC_QUPV3_WRAP0_S1_CLK 135 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 136 +#define GCC_QUPV3_WRAP0_S2_CLK 137 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 138 +#define GCC_QUPV3_WRAP0_S3_CLK 139 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 140 +#define GCC_QUPV3_WRAP0_S4_CLK 141 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 142 +#define GCC_QUPV3_WRAP0_S5_CLK 143 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 144 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 145 +#define GCC_QUPV3_WRAP1_CORE_CLK 146 +#define GCC_QUPV3_WRAP1_S0_CLK 147 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 148 +#define GCC_QUPV3_WRAP1_S1_CLK 149 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 150 +#define GCC_QUPV3_WRAP1_S2_CLK 151 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 152 +#define GCC_QUPV3_WRAP1_S3_CLK 153 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 154 +#define GCC_QUPV3_WRAP1_S4_CLK 155 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 156 +#define GCC_QUPV3_WRAP1_S5_CLK 157 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 158 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 159 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 160 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 161 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 162 +#define GCC_SDCC1_AHB_CLK 163 +#define GCC_SDCC1_APPS_CLK 164 +#define GCC_SDCC1_APPS_CLK_SRC 165 +#define GCC_SDCC1_ICE_CORE_CLK 166 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 167 +#define GCC_SDCC2_AHB_CLK 168 +#define GCC_SDCC2_APPS_CLK 169 +#define GCC_SDCC2_APPS_CLK_SRC 170 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 171 +#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 172 +#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 173 +#define GCC_UFS_PHY_AHB_CLK 174 +#define GCC_UFS_PHY_AXI_CLK 175 +#define GCC_UFS_PHY_AXI_CLK_SRC 176 +#define GCC_UFS_PHY_ICE_CORE_CLK 177 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 178 +#define GCC_UFS_PHY_PHY_AUX_CLK 179 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 180 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 182 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 183 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 184 +#define GCC_USB30_PRIM_MASTER_CLK 185 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 186 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188 +#define GCC_USB30_PRIM_SLEEP_CLK 189 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 190 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 191 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 192 +#define GCC_VDDA_VS_CLK 193 +#define GCC_VDDCX_VS_CLK 194 +#define GCC_VDDMX_VS_CLK 195 +#define GCC_VIDEO_AHB_CLK 196 +#define GCC_VIDEO_AXI0_CLK 197 +#define GCC_VIDEO_THROTTLE_CORE_CLK 198 +#define GCC_VIDEO_XO_CLK 199 +#define GCC_VS_CTRL_AHB_CLK 200 +#define GCC_VS_CTRL_CLK 201 +#define GCC_VS_CTRL_CLK_SRC 202 +#define GCC_VSENSOR_CLK_SRC 203 +#define GCC_WCSS_VS_CLK 204 +#define GCC_USB3_PRIM_CLKREF_CLK 205 +#define GCC_SYS_NOC_COMPUTE_SF_AXI_CLK 206 +#define GCC_BIMC_GPU_AXI_CLK 207 +#define GCC_UFS_MEM_CLKREF_CLK 208 + +/* GDSCs */ +#define USB30_PRIM_GDSC 0 +#define UFS_PHY_GDSC 1 +#define CAMSS_VFE0_GDSC 2 +#define CAMSS_VFE1_GDSC 3 +#define CAMSS_TOP_GDSC 4 +#define CAM_CPP_GDSC 5 +#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 6 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8 +#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 9 + +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_UFS_PHY_BCR 2 +#define GCC_USB30_PRIM_BCR 3 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 4 +#define GCC_USB3_PHY_PRIM_SP0_BCR 5 +#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6 +#define GCC_CAMSS_MICRO_BCR 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6350.h b/include/dt-bindings/clock/qcom,gcc-sm6350.h new file mode 100644 index 000000000000..ba584ca33c39 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm6350.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H + +/* GCC clocks */ +#define GPLL0 0 +#define GPLL0_OUT_EVEN 1 +#define GPLL0_OUT_ODD 2 +#define GPLL6 3 +#define GPLL6_OUT_EVEN 4 +#define GPLL7 5 +#define GCC_AGGRE_CNOC_PERIPH_CENTER_AHB_CLK 6 +#define GCC_AGGRE_NOC_CENTER_AHB_CLK 7 +#define GCC_AGGRE_NOC_PCIE_SF_AXI_CLK 8 +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 9 +#define GCC_AGGRE_NOC_WLAN_AXI_CLK 10 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 11 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12 +#define GCC_BOOT_ROM_AHB_CLK 13 +#define GCC_CAMERA_AHB_CLK 14 +#define GCC_CAMERA_AXI_CLK 15 +#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 16 +#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 17 +#define GCC_CAMERA_XO_CLK 18 +#define GCC_CE1_AHB_CLK 19 +#define GCC_CE1_AXI_CLK 20 +#define GCC_CE1_CLK 21 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 22 +#define GCC_CPUSS_AHB_CLK 23 +#define GCC_CPUSS_AHB_CLK_SRC 24 +#define GCC_CPUSS_AHB_DIV_CLK_SRC 25 +#define GCC_CPUSS_GNOC_CLK 26 +#define GCC_CPUSS_RBCPR_CLK 27 +#define GCC_DDRSS_GPU_AXI_CLK 28 +#define GCC_DISP_AHB_CLK 29 +#define GCC_DISP_AXI_CLK 30 +#define GCC_DISP_CC_SLEEP_CLK 31 +#define GCC_DISP_CC_XO_CLK 32 +#define GCC_DISP_GPLL0_CLK 33 +#define GCC_DISP_THROTTLE_AXI_CLK 34 +#define GCC_DISP_XO_CLK 35 +#define GCC_GP1_CLK 36 +#define GCC_GP1_CLK_SRC 37 +#define GCC_GP2_CLK 38 +#define GCC_GP2_CLK_SRC 39 +#define GCC_GP3_CLK 40 +#define GCC_GP3_CLK_SRC 41 +#define GCC_GPU_CFG_AHB_CLK 42 +#define GCC_GPU_GPLL0_CLK 43 +#define GCC_GPU_GPLL0_DIV_CLK 44 +#define GCC_GPU_MEMNOC_GFX_CLK 45 +#define GCC_GPU_SNOC_DVM_GFX_CLK 46 +#define GCC_NPU_AXI_CLK 47 +#define GCC_NPU_BWMON_AXI_CLK 48 +#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 49 +#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 50 +#define GCC_NPU_CFG_AHB_CLK 51 +#define GCC_NPU_DMA_CLK 52 +#define GCC_NPU_GPLL0_CLK 53 +#define GCC_NPU_GPLL0_DIV_CLK 54 +#define GCC_PCIE_0_AUX_CLK 55 +#define GCC_PCIE_0_AUX_CLK_SRC 56 +#define GCC_PCIE_0_CFG_AHB_CLK 57 +#define GCC_PCIE_0_MSTR_AXI_CLK 58 +#define GCC_PCIE_0_PIPE_CLK 59 +#define GCC_PCIE_0_SLV_AXI_CLK 60 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 61 +#define GCC_PCIE_PHY_RCHNG_CLK 62 +#define GCC_PCIE_PHY_RCHNG_CLK_SRC 63 +#define GCC_PDM2_CLK 64 +#define GCC_PDM2_CLK_SRC 65 +#define GCC_PDM_AHB_CLK 66 +#define GCC_PDM_XO4_CLK 67 +#define GCC_PRNG_AHB_CLK 68 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 69 +#define GCC_QUPV3_WRAP0_CORE_CLK 70 +#define GCC_QUPV3_WRAP0_S0_CLK 71 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72 +#define GCC_QUPV3_WRAP0_S1_CLK 73 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74 +#define GCC_QUPV3_WRAP0_S2_CLK 75 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76 +#define GCC_QUPV3_WRAP0_S3_CLK 77 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78 +#define GCC_QUPV3_WRAP0_S4_CLK 79 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S5_CLK 81 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 83 +#define GCC_QUPV3_WRAP1_CORE_CLK 84 +#define GCC_QUPV3_WRAP1_S0_CLK 85 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 86 +#define GCC_QUPV3_WRAP1_S1_CLK 87 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 88 +#define GCC_QUPV3_WRAP1_S2_CLK 89 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 90 +#define GCC_QUPV3_WRAP1_S3_CLK 91 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 92 +#define GCC_QUPV3_WRAP1_S4_CLK 93 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 94 +#define GCC_QUPV3_WRAP1_S5_CLK 95 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 96 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 97 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 98 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 99 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 100 +#define GCC_SDCC1_AHB_CLK 101 +#define GCC_SDCC1_APPS_CLK 102 +#define GCC_SDCC1_APPS_CLK_SRC 103 +#define GCC_SDCC1_ICE_CORE_CLK 104 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 105 +#define GCC_SDCC2_AHB_CLK 106 +#define GCC_SDCC2_APPS_CLK 107 +#define GCC_SDCC2_APPS_CLK_SRC 108 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 109 +#define GCC_UFS_MEM_CLKREF_CLK 110 +#define GCC_UFS_PHY_AHB_CLK 111 +#define GCC_UFS_PHY_AXI_CLK 112 +#define GCC_UFS_PHY_AXI_CLK_SRC 113 +#define GCC_UFS_PHY_ICE_CORE_CLK 114 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 115 +#define GCC_UFS_PHY_PHY_AUX_CLK 116 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 117 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 118 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 119 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 121 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 122 +#define GCC_USB30_PRIM_MASTER_CLK 123 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 124 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 125 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 126 +#define GCC_USB30_PRIM_MOCK_UTMI_DIV_CLK_SRC 127 +#define GCC_USB3_PRIM_CLKREF_CLK 128 +#define GCC_USB30_PRIM_SLEEP_CLK 129 +#define GCC_USB3_PRIM_PHY_AUX_CLK 130 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 131 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 132 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 133 +#define GCC_VIDEO_AHB_CLK 134 +#define GCC_VIDEO_AXI_CLK 135 +#define GCC_VIDEO_THROTTLE_AXI_CLK 136 +#define GCC_VIDEO_XO_CLK 137 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 138 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 139 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 140 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 141 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 142 +#define GCC_RX5_PCIE_CLKREF_CLK 143 +#define GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC 144 +#define GCC_NPU_PLL0_MAIN_DIV_CLK_SRC 145 + +/* GCC resets */ +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_SDCC1_BCR 2 +#define GCC_SDCC2_BCR 3 +#define GCC_UFS_PHY_BCR 4 +#define GCC_USB30_PRIM_BCR 5 +#define GCC_PCIE_0_BCR 6 +#define GCC_PCIE_0_PHY_BCR 7 +#define GCC_QUPV3_WRAPPER_0_BCR 8 +#define GCC_QUPV3_WRAPPER_1_BCR 9 +#define GCC_USB3_PHY_PRIM_BCR 10 +#define GCC_USB3_DP_PHY_PRIM_BCR 11 + +/* GCC GDSCs */ +#define USB30_PRIM_GDSC 0 +#define UFS_PHY_GDSC 1 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7280.h b/include/dt-bindings/clock/qcom,gpucc-sc7280.h new file mode 100644 index 000000000000..669b23b606ba --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sc7280.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H + +/* GPU_CC clocks */ +#define GPU_CC_PLL0 0 +#define GPU_CC_PLL1 1 +#define GPU_CC_AHB_CLK 2 +#define GPU_CC_CB_CLK 3 +#define GPU_CC_CRC_AHB_CLK 4 +#define GPU_CC_CX_GMU_CLK 5 +#define GPU_CC_CX_SNOC_DVM_CLK 6 +#define GPU_CC_CXO_AON_CLK 7 +#define GPU_CC_CXO_CLK 8 +#define GPU_CC_GMU_CLK_SRC 9 +#define GPU_CC_GX_GMU_CLK 10 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11 +#define GPU_CC_HUB_AHB_DIV_CLK_SRC 12 +#define GPU_CC_HUB_AON_CLK 13 +#define GPU_CC_HUB_CLK_SRC 14 +#define GPU_CC_HUB_CX_INT_CLK 15 +#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 16 +#define GPU_CC_MND1X_0_GFX3D_CLK 17 +#define GPU_CC_MND1X_1_GFX3D_CLK 18 +#define GPU_CC_SLEEP_CLK 19 + +/* GPU_CC power domains */ +#define GPU_CC_CX_GDSC 0 +#define GPU_CC_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8994.h b/include/dt-bindings/clock/qcom,mmcc-msm8994.h new file mode 100644 index 000000000000..4b289092f5a2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8994.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, Konrad Dybcio + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8994_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8994_H + +/* Clocks */ +#define MMPLL0_EARLY 0 +#define MMPLL0_PLL 1 +#define MMPLL1_EARLY 2 +#define MMPLL1_PLL 3 +#define MMPLL3_EARLY 4 +#define MMPLL3_PLL 5 +#define MMPLL4_EARLY 6 +#define MMPLL4_PLL 7 +#define MMPLL5_EARLY 8 +#define MMPLL5_PLL 9 +#define AXI_CLK_SRC 10 +#define RBBMTIMER_CLK_SRC 11 +#define PCLK0_CLK_SRC 12 +#define PCLK1_CLK_SRC 13 +#define MDP_CLK_SRC 14 +#define VSYNC_CLK_SRC 15 +#define BYTE0_CLK_SRC 16 +#define BYTE1_CLK_SRC 17 +#define ESC0_CLK_SRC 18 +#define ESC1_CLK_SRC 19 +#define MDSS_AHB_CLK 20 +#define MDSS_PCLK0_CLK 21 +#define MDSS_PCLK1_CLK 22 +#define MDSS_VSYNC_CLK 23 +#define MDSS_BYTE0_CLK 24 +#define MDSS_BYTE1_CLK 25 +#define MDSS_ESC0_CLK 26 +#define MDSS_ESC1_CLK 27 +#define CSI0_CLK_SRC 28 +#define CSI1_CLK_SRC 29 +#define CSI2_CLK_SRC 30 +#define CSI3_CLK_SRC 31 +#define VFE0_CLK_SRC 32 +#define VFE1_CLK_SRC 33 +#define CPP_CLK_SRC 34 +#define JPEG0_CLK_SRC 35 +#define JPEG1_CLK_SRC 36 +#define JPEG2_CLK_SRC 37 +#define CSI2PHYTIMER_CLK_SRC 38 +#define FD_CORE_CLK_SRC 39 +#define OCMEMNOC_CLK_SRC 40 +#define CCI_CLK_SRC 41 +#define MMSS_GP0_CLK_SRC 42 +#define MMSS_GP1_CLK_SRC 43 +#define JPEG_DMA_CLK_SRC 44 +#define MCLK0_CLK_SRC 45 +#define MCLK1_CLK_SRC 46 +#define MCLK2_CLK_SRC 47 +#define MCLK3_CLK_SRC 48 +#define CSI0PHYTIMER_CLK_SRC 49 +#define CSI1PHYTIMER_CLK_SRC 50 +#define EXTPCLK_CLK_SRC 51 +#define HDMI_CLK_SRC 52 +#define CAMSS_AHB_CLK 53 +#define CAMSS_CCI_CCI_AHB_CLK 54 +#define CAMSS_CCI_CCI_CLK 55 +#define CAMSS_VFE_CPP_AHB_CLK 56 +#define CAMSS_VFE_CPP_AXI_CLK 57 +#define CAMSS_VFE_CPP_CLK 58 +#define CAMSS_CSI0_AHB_CLK 59 +#define CAMSS_CSI0_CLK 60 +#define CAMSS_CSI0PHY_CLK 61 +#define CAMSS_CSI0PIX_CLK 62 +#define CAMSS_CSI0RDI_CLK 63 +#define CAMSS_CSI1_AHB_CLK 64 +#define CAMSS_CSI1_CLK 65 +#define CAMSS_CSI1PHY_CLK 66 +#define CAMSS_CSI1PIX_CLK 67 +#define CAMSS_CSI1RDI_CLK 68 +#define CAMSS_CSI2_AHB_CLK 69 +#define CAMSS_CSI2_CLK 70 +#define CAMSS_CSI2PHY_CLK 71 +#define CAMSS_CSI2PIX_CLK 72 +#define CAMSS_CSI2RDI_CLK 73 +#define CAMSS_CSI3_AHB_CLK 74 +#define CAMSS_CSI3_CLK 75 +#define CAMSS_CSI3PHY_CLK 76 +#define CAMSS_CSI3PIX_CLK 77 +#define CAMSS_CSI3RDI_CLK 78 +#define CAMSS_CSI_VFE0_CLK 79 +#define CAMSS_CSI_VFE1_CLK 80 +#define CAMSS_GP0_CLK 81 +#define CAMSS_GP1_CLK 82 +#define CAMSS_ISPIF_AHB_CLK 83 +#define CAMSS_JPEG_DMA_CLK 84 +#define CAMSS_JPEG_JPEG0_CLK 85 +#define CAMSS_JPEG_JPEG1_CLK 86 +#define CAMSS_JPEG_JPEG2_CLK 87 +#define CAMSS_JPEG_JPEG_AHB_CLK 88 +#define CAMSS_JPEG_JPEG_AXI_CLK 89 +#define CAMSS_MCLK0_CLK 90 +#define CAMSS_MCLK1_CLK 91 +#define CAMSS_MCLK2_CLK 92 +#define CAMSS_MCLK3_CLK 93 +#define CAMSS_MICRO_AHB_CLK 94 +#define CAMSS_PHY0_CSI0PHYTIMER_CLK 95 +#define CAMSS_PHY1_CSI1PHYTIMER_CLK 96 +#define CAMSS_PHY2_CSI2PHYTIMER_CLK 97 +#define CAMSS_TOP_AHB_CLK 98 +#define CAMSS_VFE_VFE0_CLK 99 +#define CAMSS_VFE_VFE1_CLK 100 +#define CAMSS_VFE_VFE_AHB_CLK 101 +#define CAMSS_VFE_VFE_AXI_CLK 102 +#define FD_AXI_CLK 103 +#define FD_CORE_CLK 104 +#define FD_CORE_UAR_CLK 105 +#define MDSS_AXI_CLK 106 +#define MDSS_EXTPCLK_CLK 107 +#define MDSS_HDMI_AHB_CLK 108 +#define MDSS_HDMI_CLK 109 +#define MDSS_MDP_CLK 110 +#define MMSS_MISC_AHB_CLK 111 +#define MMSS_MMSSNOC_AXI_CLK 112 +#define MMSS_S0_AXI_CLK 113 +#define OCMEMCX_OCMEMNOC_CLK 114 +#define OXILI_GFX3D_CLK 115 +#define OXILI_RBBMTIMER_CLK 116 +#define OXILICX_AHB_CLK 117 +#define VENUS0_AHB_CLK 118 +#define VENUS0_AXI_CLK 119 +#define VENUS0_OCMEMNOC_CLK 120 +#define VENUS0_VCODEC0_CLK 121 +#define VENUS0_CORE0_VCODEC_CLK 122 +#define VENUS0_CORE1_VCODEC_CLK 123 +#define VENUS0_CORE2_VCODEC_CLK 124 +#define AHB_CLK_SRC 125 +#define FD_AHB_CLK 126 + +/* GDSCs */ +#define VENUS_GDSC 0 +#define VENUS_CORE0_GDSC 1 +#define VENUS_CORE1_GDSC 2 +#define VENUS_CORE2_GDSC 3 +#define CAMSS_TOP_GDSC 4 +#define MDSS_GDSC 5 +#define JPEG_GDSC 6 +#define VFE_GDSC 7 +#define CPP_GDSC 8 +#define OXILI_GX_GDSC 9 +#define OXILI_CX_GDSC 10 +#define FD_GDSC 11 + +/* Resets */ +#define CAMSS_MICRO_BCR 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 8aaba7cd9589..aa834d516234 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -149,5 +149,15 @@ #define RPM_SMD_CE2_A_CLK 103 #define RPM_SMD_CE3_CLK 104 #define RPM_SMD_CE3_A_CLK 105 +#define RPM_SMD_QUP_CLK 106 +#define RPM_SMD_QUP_A_CLK 107 +#define RPM_SMD_MMRT_CLK 108 +#define RPM_SMD_MMRT_A_CLK 109 +#define RPM_SMD_MMNRT_CLK 110 +#define RPM_SMD_MMNRT_A_CLK 111 +#define RPM_SMD_SNOC_PERIPH_CLK 112 +#define RPM_SMD_SNOC_PERIPH_A_CLK 113 +#define RPM_SMD_SNOC_LPASS_CLK 114 +#define RPM_SMD_SNOC_LPASS_A_CLK 115 #endif diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h index 583a99161aaa..0a7d1be0d124 100644 --- a/include/dt-bindings/clock/qcom,rpmh.h +++ b/include/dt-bindings/clock/qcom,rpmh.h @@ -31,5 +31,7 @@ #define RPMH_RF_CLK5_A 22 #define RPMH_PKA_CLK 23 #define RPMH_HWKM_CLK 24 +#define RPMH_QLINK_CLK 25 +#define RPMH_QLINK_CLK_A 26 #endif diff --git a/include/dt-bindings/clock/qcom,videocc-sc7280.h b/include/dt-bindings/clock/qcom,videocc-sc7280.h new file mode 100644 index 000000000000..9e00c3a5f75e --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sc7280.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H + +/* VIDEO_CC clocks */ +#define VIDEO_PLL0 0 +#define VIDEO_CC_IRIS_AHB_CLK 1 +#define VIDEO_CC_IRIS_CLK_SRC 2 +#define VIDEO_CC_MVS0_AXI_CLK 3 +#define VIDEO_CC_MVS0_CORE_CLK 4 +#define VIDEO_CC_MVSC_CORE_CLK 5 +#define VIDEO_CC_MVSC_CTL_AXI_CLK 6 +#define VIDEO_CC_SLEEP_CLK 7 +#define VIDEO_CC_SLEEP_CLK_SRC 8 +#define VIDEO_CC_VENUS_AHB_CLK 9 +#define VIDEO_CC_XO_CLK 10 +#define VIDEO_CC_XO_CLK_SRC 11 + +/* VIDEO_CC power domains */ +#define MVS0_GDSC 0 +#define MVSC_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h new file mode 100644 index 000000000000..0bb17ff1a01a --- /dev/null +++ b/include/dt-bindings/clock/r9a07g044-cpg.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2021 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A07G044 CPG Core Clocks */ +#define R9A07G044_CLK_I 0 +#define R9A07G044_CLK_I2 1 +#define R9A07G044_CLK_G 2 +#define R9A07G044_CLK_S0 3 +#define R9A07G044_CLK_S1 4 +#define R9A07G044_CLK_SPI0 5 +#define R9A07G044_CLK_SPI1 6 +#define R9A07G044_CLK_SD0 7 +#define R9A07G044_CLK_SD1 8 +#define R9A07G044_CLK_M0 9 +#define R9A07G044_CLK_M1 10 +#define R9A07G044_CLK_M2 11 +#define R9A07G044_CLK_M3 12 +#define R9A07G044_CLK_M4 13 +#define R9A07G044_CLK_HP 14 +#define R9A07G044_CLK_TSU 15 +#define R9A07G044_CLK_ZT 16 +#define R9A07G044_CLK_P0 17 +#define R9A07G044_CLK_P1 18 +#define R9A07G044_CLK_P2 19 +#define R9A07G044_CLK_AT 20 +#define R9A07G044_OSCCLK 21 +#define R9A07G044_CLK_P0_DIV2 22 + +/* R9A07G044 Module Clocks */ +#define R9A07G044_CA55_SCLK 0 +#define R9A07G044_CA55_PCLK 1 +#define R9A07G044_CA55_ATCLK 2 +#define R9A07G044_CA55_GICCLK 3 +#define R9A07G044_CA55_PERICLK 4 +#define R9A07G044_CA55_ACLK 5 +#define R9A07G044_CA55_TSCLK 6 +#define R9A07G044_GIC600_GICCLK 7 +#define R9A07G044_IA55_CLK 8 +#define R9A07G044_IA55_PCLK 9 +#define R9A07G044_MHU_PCLK 10 +#define R9A07G044_SYC_CNT_CLK 11 +#define R9A07G044_DMAC_ACLK 12 +#define R9A07G044_DMAC_PCLK 13 +#define R9A07G044_OSTM0_PCLK 14 +#define R9A07G044_OSTM1_PCLK 15 +#define R9A07G044_OSTM2_PCLK 16 +#define R9A07G044_MTU_X_MCK_MTU3 17 +#define R9A07G044_POE3_CLKM_POE 18 +#define R9A07G044_GPT_PCLK 19 +#define R9A07G044_POEG_A_CLKP 20 +#define R9A07G044_POEG_B_CLKP 21 +#define R9A07G044_POEG_C_CLKP 22 +#define R9A07G044_POEG_D_CLKP 23 +#define R9A07G044_WDT0_PCLK 24 +#define R9A07G044_WDT0_CLK 25 +#define R9A07G044_WDT1_PCLK 26 +#define R9A07G044_WDT1_CLK 27 +#define R9A07G044_WDT2_PCLK 28 +#define R9A07G044_WDT2_CLK 29 +#define R9A07G044_SPI_CLK2 30 +#define R9A07G044_SPI_CLK 31 +#define R9A07G044_SDHI0_IMCLK 32 +#define R9A07G044_SDHI0_IMCLK2 33 +#define R9A07G044_SDHI0_CLK_HS 34 +#define R9A07G044_SDHI0_ACLK 35 +#define R9A07G044_SDHI1_IMCLK 36 +#define R9A07G044_SDHI1_IMCLK2 37 +#define R9A07G044_SDHI1_CLK_HS 38 +#define R9A07G044_SDHI1_ACLK 39 +#define R9A07G044_GPU_CLK 40 +#define R9A07G044_GPU_AXI_CLK 41 +#define R9A07G044_GPU_ACE_CLK 42 +#define R9A07G044_ISU_ACLK 43 +#define R9A07G044_ISU_PCLK 44 +#define R9A07G044_H264_CLK_A 45 +#define R9A07G044_H264_CLK_P 46 +#define R9A07G044_CRU_SYSCLK 47 +#define R9A07G044_CRU_VCLK 48 +#define R9A07G044_CRU_PCLK 49 +#define R9A07G044_CRU_ACLK 50 +#define R9A07G044_MIPI_DSI_PLLCLK 51 +#define R9A07G044_MIPI_DSI_SYSCLK 52 +#define R9A07G044_MIPI_DSI_ACLK 53 +#define R9A07G044_MIPI_DSI_PCLK 54 +#define R9A07G044_MIPI_DSI_VCLK 55 +#define R9A07G044_MIPI_DSI_LPCLK 56 +#define R9A07G044_LCDC_CLK_A 57 +#define R9A07G044_LCDC_CLK_P 58 +#define R9A07G044_LCDC_CLK_D 59 +#define R9A07G044_SSI0_PCLK2 60 +#define R9A07G044_SSI0_PCLK_SFR 61 +#define R9A07G044_SSI1_PCLK2 62 +#define R9A07G044_SSI1_PCLK_SFR 63 +#define R9A07G044_SSI2_PCLK2 64 +#define R9A07G044_SSI2_PCLK_SFR 65 +#define R9A07G044_SSI3_PCLK2 66 +#define R9A07G044_SSI3_PCLK_SFR 67 +#define R9A07G044_SRC_CLKP 68 +#define R9A07G044_USB_U2H0_HCLK 69 +#define R9A07G044_USB_U2H1_HCLK 70 +#define R9A07G044_USB_U2P_EXR_CPUCLK 71 +#define R9A07G044_USB_PCLK 72 +#define R9A07G044_ETH0_CLK_AXI 73 +#define R9A07G044_ETH0_CLK_CHI 74 +#define R9A07G044_ETH1_CLK_AXI 75 +#define R9A07G044_ETH1_CLK_CHI 76 +#define R9A07G044_I2C0_PCLK 77 +#define R9A07G044_I2C1_PCLK 78 +#define R9A07G044_I2C2_PCLK 79 +#define R9A07G044_I2C3_PCLK 80 +#define R9A07G044_SCIF0_CLK_PCK 81 +#define R9A07G044_SCIF1_CLK_PCK 82 +#define R9A07G044_SCIF2_CLK_PCK 83 +#define R9A07G044_SCIF3_CLK_PCK 84 +#define R9A07G044_SCIF4_CLK_PCK 85 +#define R9A07G044_SCI0_CLKP 86 +#define R9A07G044_SCI1_CLKP 87 +#define R9A07G044_IRDA_CLKP 88 +#define R9A07G044_RSPI0_CLKB 89 +#define R9A07G044_RSPI1_CLKB 90 +#define R9A07G044_RSPI2_CLKB 91 +#define R9A07G044_CANFD_PCLK 92 +#define R9A07G044_GPIO_HCLK 93 +#define R9A07G044_ADC_ADCLK 94 +#define R9A07G044_ADC_PCLK 95 +#define R9A07G044_TSU_PCLK 96 + +/* R9A07G044 Resets */ +#define R9A07G044_CA55_RST_1_0 0 +#define R9A07G044_CA55_RST_1_1 1 +#define R9A07G044_CA55_RST_3_0 2 +#define R9A07G044_CA55_RST_3_1 3 +#define R9A07G044_CA55_RST_4 4 +#define R9A07G044_CA55_RST_5 5 +#define R9A07G044_CA55_RST_6 6 +#define R9A07G044_CA55_RST_7 7 +#define R9A07G044_CA55_RST_8 8 +#define R9A07G044_CA55_RST_9 9 +#define R9A07G044_CA55_RST_10 10 +#define R9A07G044_CA55_RST_11 11 +#define R9A07G044_CA55_RST_12 12 +#define R9A07G044_GIC600_GICRESET_N 13 +#define R9A07G044_GIC600_DBG_GICRESET_N 14 +#define R9A07G044_IA55_RESETN 15 +#define R9A07G044_MHU_RESETN 16 +#define R9A07G044_DMAC_ARESETN 17 +#define R9A07G044_DMAC_RST_ASYNC 18 +#define R9A07G044_SYC_RESETN 19 +#define R9A07G044_OSTM0_PRESETZ 20 +#define R9A07G044_OSTM1_PRESETZ 21 +#define R9A07G044_OSTM2_PRESETZ 22 +#define R9A07G044_MTU_X_PRESET_MTU3 23 +#define R9A07G044_POE3_RST_M_REG 24 +#define R9A07G044_GPT_RST_C 25 +#define R9A07G044_POEG_A_RST 26 +#define R9A07G044_POEG_B_RST 27 +#define R9A07G044_POEG_C_RST 28 +#define R9A07G044_POEG_D_RST 29 +#define R9A07G044_WDT0_PRESETN 30 +#define R9A07G044_WDT1_PRESETN 31 +#define R9A07G044_WDT2_PRESETN 32 +#define R9A07G044_SPI_RST 33 +#define R9A07G044_SDHI0_IXRST 34 +#define R9A07G044_SDHI1_IXRST 35 +#define R9A07G044_GPU_RESETN 36 +#define R9A07G044_GPU_AXI_RESETN 37 +#define R9A07G044_GPU_ACE_RESETN 38 +#define R9A07G044_ISU_ARESETN 39 +#define R9A07G044_ISU_PRESETN 40 +#define R9A07G044_H264_X_RESET_VCP 41 +#define R9A07G044_H264_CP_PRESET_P 42 +#define R9A07G044_CRU_CMN_RSTB 43 +#define R9A07G044_CRU_PRESETN 44 +#define R9A07G044_CRU_ARESETN 45 +#define R9A07G044_MIPI_DSI_CMN_RSTB 46 +#define R9A07G044_MIPI_DSI_ARESET_N 47 +#define R9A07G044_MIPI_DSI_PRESET_N 48 +#define R9A07G044_LCDC_RESET_N 49 +#define R9A07G044_SSI0_RST_M2_REG 50 +#define R9A07G044_SSI1_RST_M2_REG 51 +#define R9A07G044_SSI2_RST_M2_REG 52 +#define R9A07G044_SSI3_RST_M2_REG 53 +#define R9A07G044_SRC_RST 54 +#define R9A07G044_USB_U2H0_HRESETN 55 +#define R9A07G044_USB_U2H1_HRESETN 56 +#define R9A07G044_USB_U2P_EXL_SYSRST 57 +#define R9A07G044_USB_PRESETN 58 +#define R9A07G044_ETH0_RST_HW_N 59 +#define R9A07G044_ETH1_RST_HW_N 60 +#define R9A07G044_I2C0_MRST 61 +#define R9A07G044_I2C1_MRST 62 +#define R9A07G044_I2C2_MRST 63 +#define R9A07G044_I2C3_MRST 64 +#define R9A07G044_SCIF0_RST_SYSTEM_N 65 +#define R9A07G044_SCIF1_RST_SYSTEM_N 66 +#define R9A07G044_SCIF2_RST_SYSTEM_N 67 +#define R9A07G044_SCIF3_RST_SYSTEM_N 68 +#define R9A07G044_SCIF4_RST_SYSTEM_N 69 +#define R9A07G044_SCI0_RST 70 +#define R9A07G044_SCI1_RST 71 +#define R9A07G044_IRDA_RST 72 +#define R9A07G044_RSPI0_RST 73 +#define R9A07G044_RSPI1_RST 74 +#define R9A07G044_RSPI2_RST 75 +#define R9A07G044_CANFD_RSTP_N 76 +#define R9A07G044_CANFD_RSTC_N 77 +#define R9A07G044_GPIO_RSTN 78 +#define R9A07G044_GPIO_PORT_RESETN 79 +#define R9A07G044_GPIO_SPARE_RESETN 80 +#define R9A07G044_ADC_PRESETN 81 +#define R9A07G044_ADC_ADRST_N 82 +#define R9A07G044_TSU_PRESETN 83 + +#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */ diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h index 35a5a01f9697..a96a9870ad59 100644 --- a/include/dt-bindings/clock/rk3036-cru.h +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -81,6 +81,7 @@ #define HCLK_OTG0 449 #define HCLK_OTG1 450 #define HCLK_NANDC 453 +#define HCLK_SFC 454 #define HCLK_SDMMC 456 #define HCLK_SDIO 457 #define HCLK_EMMC 459 diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h index 4cdaf135829c..e02770b98e6c 100644 --- a/include/dt-bindings/clock/stm32mp1-clks.h +++ b/include/dt-bindings/clock/stm32mp1-clks.h @@ -248,4 +248,31 @@ #define STM32MP1_LAST_CLK 232 +/* SCMI clock identifiers */ +#define CK_SCMI0_HSE 0 +#define CK_SCMI0_HSI 1 +#define CK_SCMI0_CSI 2 +#define CK_SCMI0_LSE 3 +#define CK_SCMI0_LSI 4 +#define CK_SCMI0_PLL2_Q 5 +#define CK_SCMI0_PLL2_R 6 +#define CK_SCMI0_MPU 7 +#define CK_SCMI0_AXI 8 +#define CK_SCMI0_BSEC 9 +#define CK_SCMI0_CRYP1 10 +#define CK_SCMI0_GPIOZ 11 +#define CK_SCMI0_HASH1 12 +#define CK_SCMI0_I2C4 13 +#define CK_SCMI0_I2C6 14 +#define CK_SCMI0_IWDG1 15 +#define CK_SCMI0_RNG1 16 +#define CK_SCMI0_RTC 17 +#define CK_SCMI0_RTCAPB 18 +#define CK_SCMI0_SPI6 19 +#define CK_SCMI0_USART1 20 + +#define CK_SCMI1_PLL3_Q 0 +#define CK_SCMI1_PLL3_R 1 +#define CK_SCMI1_MCU 2 + #endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */ diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h deleted file mode 100644 index bf2ff6d2ee23..000000000000 --- a/include/dt-bindings/clock/zx296718-clock.h +++ /dev/null @@ -1,164 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2015 - 2016 ZTE Corporation. - */ -#ifndef __DT_BINDINGS_CLOCK_ZX296718_H -#define __DT_BINDINGS_CLOCK_ZX296718_H - -/* PLL */ -#define ZX296718_PLL_CPU 1 -#define ZX296718_PLL_MAC 2 -#define ZX296718_PLL_MM0 3 -#define ZX296718_PLL_MM1 4 -#define ZX296718_PLL_VGA 5 -#define ZX296718_PLL_DDR 6 -#define ZX296718_PLL_AUDIO 7 -#define ZX296718_PLL_HSIC 8 -#define CPU_DBG_GATE 9 -#define A72_GATE 10 -#define CPU_PERI_GATE 11 -#define A53_GATE 12 -#define DDR1_GATE 13 -#define DDR0_GATE 14 -#define SD1_WCLK 15 -#define SD1_AHB 16 -#define SD0_WCLK 17 -#define SD0_AHB 18 -#define EMMC_WCLK 19 -#define EMMC_NAND_AXI 20 -#define NAND_WCLK 21 -#define EMMC_NAND_AHB 22 -#define LSP1_148M5 23 -#define LSP1_99M 24 -#define LSP1_24M 25 -#define LSP0_74M25 26 -#define LSP0_32K 27 -#define LSP0_148M5 28 -#define LSP0_99M 29 -#define LSP0_24M 30 -#define DEMUX_AXI 31 -#define DEMUX_APB 32 -#define DEMUX_148M5 33 -#define DEMUX_108M 34 -#define AUDIO_APB 35 -#define AUDIO_99M 36 -#define AUDIO_24M 37 -#define AUDIO_16M384 38 -#define AUDIO_32K 39 -#define WDT_WCLK 40 -#define TIMER_WCLK 41 -#define VDE_ACLK 42 -#define VCE_ACLK 43 -#define HDE_ACLK 44 -#define GPU_ACLK 45 -#define SAPPU_ACLK 46 -#define SAPPU_WCLK 47 -#define VOU_ACLK 48 -#define VOU_MAIN_WCLK 49 -#define VOU_AUX_WCLK 50 -#define VOU_PPU_WCLK 51 -#define MIPI_CFG_CLK 52 -#define VGA_I2C_WCLK 53 -#define MIPI_REF_CLK 54 -#define HDMI_OSC_CEC 55 -#define HDMI_OSC_CLK 56 -#define HDMI_XCLK 57 -#define VIU_M0_ACLK 58 -#define VIU_M1_ACLK 59 -#define VIU_WCLK 60 -#define VIU_JPEG_WCLK 61 -#define VIU_CFG_CLK 62 -#define TS_SYS_WCLK 63 -#define TS_SYS_108M 64 -#define USB20_HCLK 65 -#define USB20_PHY_CLK 66 -#define USB21_HCLK 67 -#define USB21_PHY_CLK 68 -#define GMAC_RMIICLK 69 -#define GMAC_PCLK 70 -#define GMAC_ACLK 71 -#define GMAC_RFCLK 72 -#define TEMPSENSOR_GATE 73 - -#define TOP_NR_CLKS 74 - - -#define LSP0_TIMER3_PCLK 1 -#define LSP0_TIMER3_WCLK 2 -#define LSP0_TIMER4_PCLK 3 -#define LSP0_TIMER4_WCLK 4 -#define LSP0_TIMER5_PCLK 5 -#define LSP0_TIMER5_WCLK 6 -#define LSP0_UART3_PCLK 7 -#define LSP0_UART3_WCLK 8 -#define LSP0_UART1_PCLK 9 -#define LSP0_UART1_WCLK 10 -#define LSP0_UART2_PCLK 11 -#define LSP0_UART2_WCLK 12 -#define LSP0_SPIFC0_PCLK 13 -#define LSP0_SPIFC0_WCLK 14 -#define LSP0_I2C4_PCLK 15 -#define LSP0_I2C4_WCLK 16 -#define LSP0_I2C5_PCLK 17 -#define LSP0_I2C5_WCLK 18 -#define LSP0_SSP0_PCLK 19 -#define LSP0_SSP0_WCLK 20 -#define LSP0_SSP1_PCLK 21 -#define LSP0_SSP1_WCLK 22 -#define LSP0_USIM_PCLK 23 -#define LSP0_USIM_WCLK 24 -#define LSP0_GPIO_PCLK 25 -#define LSP0_GPIO_WCLK 26 -#define LSP0_I2C3_PCLK 27 -#define LSP0_I2C3_WCLK 28 - -#define LSP0_NR_CLKS 29 - - -#define LSP1_UART4_PCLK 1 -#define LSP1_UART4_WCLK 2 -#define LSP1_UART5_PCLK 3 -#define LSP1_UART5_WCLK 4 -#define LSP1_PWM_PCLK 5 -#define LSP1_PWM_WCLK 6 -#define LSP1_I2C2_PCLK 7 -#define LSP1_I2C2_WCLK 8 -#define LSP1_SSP2_PCLK 9 -#define LSP1_SSP2_WCLK 10 -#define LSP1_SSP3_PCLK 11 -#define LSP1_SSP3_WCLK 12 -#define LSP1_SSP4_PCLK 13 -#define LSP1_SSP4_WCLK 14 -#define LSP1_USIM1_PCLK 15 -#define LSP1_USIM1_WCLK 16 - -#define LSP1_NR_CLKS 17 - - -#define AUDIO_I2S0_WCLK 1 -#define AUDIO_I2S0_PCLK 2 -#define AUDIO_I2S1_WCLK 3 -#define AUDIO_I2S1_PCLK 4 -#define AUDIO_I2S2_WCLK 5 -#define AUDIO_I2S2_PCLK 6 -#define AUDIO_I2S3_WCLK 7 -#define AUDIO_I2S3_PCLK 8 -#define AUDIO_I2C0_WCLK 9 -#define AUDIO_I2C0_PCLK 10 -#define AUDIO_SPDIF0_WCLK 11 -#define AUDIO_SPDIF0_PCLK 12 -#define AUDIO_SPDIF1_WCLK 13 -#define AUDIO_SPDIF1_PCLK 14 -#define AUDIO_TIMER_WCLK 15 -#define AUDIO_TIMER_PCLK 16 -#define AUDIO_TDM_WCLK 17 -#define AUDIO_TDM_PCLK 18 -#define AUDIO_TS_PCLK 19 -#define I2S0_WCLK_MUX 20 -#define I2S1_WCLK_MUX 21 -#define I2S2_WCLK_MUX 22 -#define I2S3_WCLK_MUX 23 - -#define AUDIO_NR_CLKS 24 - -#endif diff --git a/include/dt-bindings/gce/mt8192-gce.h b/include/dt-bindings/gce/mt8192-gce.h new file mode 100644 index 000000000000..9e5a0eb040a0 --- /dev/null +++ b/include/dt-bindings/gce/mt8192-gce.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + * Author: Yongqiang Niu <yongqiang.niu@mediatek.com> + */ + +#ifndef _DT_BINDINGS_GCE_MT8192_H +#define _DT_BINDINGS_GCE_MT8192_H + +/* assign timeout 0 also means default */ +#define CMDQ_NO_TIMEOUT 0xffffffff +#define CMDQ_TIMEOUT_DEFAULT 1000 + +/* GCE thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +/* CPR count in 32bit register */ +#define GCE_CPR_COUNT 1312 + +/* GCE subsys table */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1880XXXX 5 +#define SUBSYS_1881XXXX 6 +#define SUBSYS_1882XXXX 7 +#define SUBSYS_1883XXXX 8 +#define SUBSYS_1884XXXX 9 +#define SUBSYS_1000XXXX 10 +#define SUBSYS_1001XXXX 11 +#define SUBSYS_1002XXXX 12 +#define SUBSYS_1003XXXX 13 +#define SUBSYS_1004XXXX 14 +#define SUBSYS_1005XXXX 15 +#define SUBSYS_1020XXXX 16 +#define SUBSYS_1028XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1800XXXX 22 +#define SUBSYS_1801XXXX 23 +#define SUBSYS_1802XXXX 24 +#define SUBSYS_1804XXXX 25 +#define SUBSYS_1805XXXX 26 +#define SUBSYS_1808XXXX 27 +#define SUBSYS_180aXXXX 28 +#define SUBSYS_180bXXXX 29 + +#define CMDQ_EVENT_VDEC_LAT_SOF_0 0 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_0 1 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_1 2 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_2 3 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_3 4 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_4 5 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_5 6 +#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_6 7 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_0 8 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_1 9 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_2 10 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_3 11 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_4 12 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_5 13 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_6 14 +#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_7 15 + +#define CMDQ_EVENT_ISP_FRAME_DONE_A 65 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 66 +#define CMDQ_EVENT_ISP_FRAME_DONE_C 67 +#define CMDQ_EVENT_CAMSV0_PASS1_DONE 68 +#define CMDQ_EVENT_CAMSV02_PASS1_DONE 69 +#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70 +#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71 +#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72 +#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73 +#define CMDQ_EVENT_MRAW_1_PASS1_DONE 74 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82 +#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83 +#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84 +#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85 +#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86 +#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87 +#define CMDQ_EVENT_TG_OVRUN_A_INT 88 +#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89 +#define CMDQ_EVENT_TG_OVRUN_B_INT 90 +#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91 +#define CMDQ_EVENT_TG_OVRUN_C_INT 92 +#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 93 +#define CMDQ_EVENT_TG_OVRUN_M0_INT 94 +#define CMDQ_EVENT_DMA_R1_ERROR_M0_INT 95 +#define CMDQ_EVENT_TG_GRABERR_M0_INT 96 +#define CMDQ_EVENT_TG_GRABERR_M1_INT 97 +#define CMDQ_EVENT_TG_GRABERR_A_INT 98 +#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99 +#define CMDQ_EVENT_TG_GRABERR_B_INT 100 +#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101 +#define CMDQ_EVENT_TG_GRABERR_C_INT 102 +#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 103 + +#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129 +#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130 +#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131 +#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132 +#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133 +#define CMDQ_EVENT_VENC_C0_CMDQ_WP_2ND_STAGE_DONE 134 +#define CMDQ_EVENT_VENC_C0_CMDQ_WP_3RD_STAGE_DONE 135 +#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136 +#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137 +#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138 + +#define CMDQ_EVENT_VDEC_CORE0_SOF_0 160 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_0 161 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_1 162 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_2 163 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_3 164 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_4 165 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_5 166 +#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_6 167 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_0 168 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_1 169 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_2 170 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_3 171 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_4 172 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_5 173 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_6 174 +#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_7 175 +#define CMDQ_EVENT_FDVT_DONE 177 +#define CMDQ_EVENT_FE_DONE 178 +#define CMDQ_EVENT_RSC_DONE 179 +#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 180 +#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 181 + +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_0 193 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_1 194 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_2 195 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_3 196 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_4 197 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_5 198 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_6 199 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_7 200 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_8 201 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_9 202 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_10 203 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_11 204 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_12 205 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_13 206 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_14 207 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_15 208 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_16 209 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_17 210 +#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_18 211 +#define CMDQ_EVENT_IMG2_DIP_DMA_ERR_EVENT 212 +#define CMDQ_EVENT_IMG2_AMD_FRAME_DONE 213 +#define CMDQ_EVENT_IMG2_MFB_DONE_LINK_MISC 214 +#define CMDQ_EVENT_IMG2_WPE_A_DONE_LINK_MISC 215 +#define CMDQ_EVENT_IMG2_MSS_DONE_LINK_MISC 216 + +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_0 225 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_1 226 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_2 227 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_3 228 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_4 229 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_5 230 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_6 231 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_7 232 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_8 233 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_9 234 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_10 235 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_11 236 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_12 237 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_13 238 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_14 239 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_15 240 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_16 241 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_17 242 +#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_18 243 +#define CMDQ_EVENT_IMG1_DIP_DMA_ERR_EVENT 244 +#define CMDQ_EVENT_IMG1_AMD_FRAME_DONE 245 +#define CMDQ_EVENT_IMG1_MFB_DONE_LINK_MISC 246 +#define CMDQ_EVENT_IMG1_WPE_A_DONE_LINK_MISC 247 +#define CMDQ_EVENT_IMG1_MSS_DONE_LINK_MISC 248 + +#define CMDQ_EVENT_MDP_RDMA0_SOF 256 +#define CMDQ_EVENT_MDP_RDMA1_SOF 257 +#define CMDQ_EVENT_MDP_AAL0_SOF 258 +#define CMDQ_EVENT_MDP_AAL1_SOF 259 +#define CMDQ_EVENT_MDP_HDR0_SOF 260 +#define CMDQ_EVENT_MDP_HDR1_SOF 261 +#define CMDQ_EVENT_MDP_RSZ0_SOF 262 +#define CMDQ_EVENT_MDP_RSZ1_SOF 263 +#define CMDQ_EVENT_MDP_WROT0_SOF 264 +#define CMDQ_EVENT_MDP_WROT1_SOF 265 +#define CMDQ_EVENT_MDP_TDSHP0_SOF 266 +#define CMDQ_EVENT_MDP_TDSHP1_SOF 267 +#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 268 +#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 269 +#define CMDQ_EVENT_MDP_COLOR0_SOF 270 +#define CMDQ_EVENT_MDP_COLOR1_SOF 271 +#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290 +#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291 +#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294 +#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295 +#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 302 +#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 303 +#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 306 +#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 307 +#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 308 +#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 309 +#define CMDQ_EVENT_MDP_COLOR1_FRAME_DONE 312 +#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 313 +#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 316 +#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 317 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335 +#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338 +#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339 +#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342 +#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343 + +#define CMDQ_EVENT_DISP_OVL0_SOF 384 +#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385 +#define CMDQ_EVENT_DISP_RDMA0_SOF 386 +#define CMDQ_EVENT_DISP_RSZ0_SOF 387 +#define CMDQ_EVENT_DISP_COLOR0_SOF 388 +#define CMDQ_EVENT_DISP_CCORR0_SOF 389 +#define CMDQ_EVENT_DISP_AAL0_SOF 390 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 391 +#define CMDQ_EVENT_DISP_POSTMASK0_SOF 392 +#define CMDQ_EVENT_DISP_DITHER0_SOF 393 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_SOF 394 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_SOF 395 +#define CMDQ_EVENT_DSI0_SOF 396 +#define CMDQ_EVENT_DISP_WDMA0_SOF 397 +#define CMDQ_EVENT_DISP_UFBC_WDMA0_SOF 398 +#define CMDQ_EVENT_DISP_PWM0_SOF 399 +#define CMDQ_EVENT_DISP_OVL2_2L_SOF 400 +#define CMDQ_EVENT_DISP_RDMA4_SOF 401 +#define CMDQ_EVENT_DISP_DPI0_SOF 402 +#define CMDQ_EVENT_MDP_RDMA4_SOF 403 +#define CMDQ_EVENT_MDP_HDR4_SOF 404 +#define CMDQ_EVENT_MDP_RSZ4_SOF 405 +#define CMDQ_EVENT_MDP_AAL4_SOF 406 +#define CMDQ_EVENT_MDP_TDSHP4_SOF 407 +#define CMDQ_EVENT_MDP_COLOR4_SOF 408 +#define CMDQ_EVENT_DISP_Y2R0_SOF 409 +#define CMDQ_EVENT_MDP_TDSHP4_FRAME_DONE 410 +#define CMDQ_EVENT_MDP_RSZ4_FRAME_DONE 411 +#define CMDQ_EVENT_MDP_RDMA4_FRAME_DONE 412 +#define CMDQ_EVENT_MDP_HDR4_FRAME_DONE 413 +#define CMDQ_EVENT_MDP_COLOR4_FRAME_DONE 414 +#define CMDQ_EVENT_MDP_AAL4_FRAME_DONE 415 +#define CMDQ_EVENT_DSI0_FRAME_DONE 416 +#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 417 +#define CMDQ_EVENT_DISP_UFBC_WDMA0_FRAME_DONE 418 +#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 419 +#define CMDQ_EVENT_DISP_RDMA4_FRAME_DONE 420 +#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 421 +#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 422 +#define CMDQ_EVENT_DISP_OVL2_2L_FRAME_DONE 423 +#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 424 +#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 425 +#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 426 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_FRAME_DONE 427 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 428 +#define CMDQ_EVENT_DISP_DPI0_FRAME_DONE 429 +#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 430 +#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 431 +#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 432 +#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 433 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449 +#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450 +#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451 +#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452 +#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453 +#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454 +#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455 +#define CMDQ_EVENT_DISP_OVL2_2L_RST_DONE_ENG_EVENT 456 +#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 457 +#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 458 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 459 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 460 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 461 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 462 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 463 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 464 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 465 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 466 +#define CMDQ_MAX_HW_EVENT 512 + +#endif diff --git a/include/dt-bindings/gce/mt8195-gce.h b/include/dt-bindings/gce/mt8195-gce.h new file mode 100644 index 000000000000..dcfb302b8a5b --- /dev/null +++ b/include/dt-bindings/gce/mt8195-gce.h @@ -0,0 +1,812 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Jason-JH Lin <jason0jh.lin@mediatek.com> + */ + +#ifndef _DT_BINDINGS_GCE_MT8195_H +#define _DT_BINDINGS_GCE_MT8195_H + +/* assign timeout 0 also means default */ +#define CMDQ_NO_TIMEOUT 0xffffffff +#define CMDQ_TIMEOUT_DEFAULT 1000 + +/* GCE thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +/* CPR count in 32bit register */ +#define GCE_CPR_COUNT 1312 + +/* GCE subsys table */ +#define SUBSYS_1400XXXX 0 +#define SUBSYS_1401XXXX 1 +#define SUBSYS_1402XXXX 2 +#define SUBSYS_1c00XXXX 3 +#define SUBSYS_1c01XXXX 4 +#define SUBSYS_1c02XXXX 5 +#define SUBSYS_1c10XXXX 6 +#define SUBSYS_1c11XXXX 7 +#define SUBSYS_1c12XXXX 8 +#define SUBSYS_14f0XXXX 9 +#define SUBSYS_14f1XXXX 10 +#define SUBSYS_14f2XXXX 11 +#define SUBSYS_1800XXXX 12 +#define SUBSYS_1801XXXX 13 +#define SUBSYS_1802XXXX 14 +#define SUBSYS_1803XXXX 15 +#define SUBSYS_1032XXXX 16 +#define SUBSYS_1033XXXX 17 +#define SUBSYS_1600XXXX 18 +#define SUBSYS_1601XXXX 19 +#define SUBSYS_14e0XXXX 20 +#define SUBSYS_1c20XXXX 21 +#define SUBSYS_1c30XXXX 22 +#define SUBSYS_1c40XXXX 23 +#define SUBSYS_1c50XXXX 24 +#define SUBSYS_1c60XXXX 25 + +/* GCE General Purpose Register (GPR) support */ +#define GCE_GPR_R00 0x0 +#define GCE_GPR_R01 0x1 +#define GCE_GPR_R02 0x2 +#define GCE_GPR_R03 0x3 +#define GCE_GPR_R04 0x4 +#define GCE_GPR_R05 0x5 +#define GCE_GPR_R06 0x6 +#define GCE_GPR_R07 0x7 +#define GCE_GPR_R08 0x8 +#define GCE_GPR_R09 0x9 +#define GCE_GPR_R10 0xa +#define GCE_GPR_R11 0xb +#define GCE_GPR_R12 0xc +#define GCE_GPR_R13 0xd +#define GCE_GPR_R14 0xe +#define GCE_GPR_R15 0xf + +/* GCE hw event id */ +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_0 1 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_1 2 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_2 3 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_3 4 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_4 5 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_5 6 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_6 7 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_7 8 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_8 9 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_9 10 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_10 11 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_11 12 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_12 13 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_13 14 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_14 15 +#define CMDQ_EVENT_TRAW0_DMA_ERROR_INT 16 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_0 17 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_1 18 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_2 19 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_3 20 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_4 21 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_5 22 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_6 23 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_7 24 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_8 25 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_9 26 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_10 27 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_11 28 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_12 29 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_13 30 +#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_14 31 +#define CMDQ_EVENT_TRAW1_DMA_ERROR_INT 32 + +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_0 65 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_1 66 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_2 67 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_3 68 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_4 69 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_5 70 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_6 71 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_7 72 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_8 73 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_9 74 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_10 75 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_11 76 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_12 77 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_13 78 +#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_14 79 +#define CMDQ_EVENT_DIP0_DMA_ERR 80 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_0 81 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_1 82 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_2 83 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_3 84 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_4 85 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_5 86 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_6 87 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_7 88 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_8 89 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_9 90 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_10 91 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_11 92 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_12 93 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_13 94 +#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_14 95 +#define CMDQ_EVENT_PQA0_DMA_ERR 96 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_0 97 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_1 98 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_2 99 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_3 100 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_4 101 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_5 102 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_6 103 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_7 104 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_8 105 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_9 106 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_10 107 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_11 108 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_12 109 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_13 110 +#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_14 111 +#define CMDQ_EVENT_PQB0_DMA_ERR 112 +#define CMDQ_EVENT_DIP0_DUMMY_0 113 +#define CMDQ_EVENT_DIP0_DUMMY_1 114 +#define CMDQ_EVENT_DIP0_DUMMY_2 115 +#define CMDQ_EVENT_DIP0_DUMMY_3 116 +#define CMDQ_EVENT_WPE0_EIS_GCE_FRAME_DONE 117 +#define CMDQ_EVENT_WPE0_EIS_DONE_SYNC_OUT 118 +#define CMDQ_EVENT_WPE0_TNR_GCE_FRAME_DONE 119 +#define CMDQ_EVENT_WPE0_TNR_DONE_SYNC_OUT 120 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_0 121 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_1 122 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_2 123 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_3 124 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_4 125 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_5 126 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_6 127 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_7 128 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_8 129 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_9 130 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_10 131 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_11 132 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_12 133 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_13 134 +#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_14 135 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_0 136 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_1 137 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_2 138 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_3 139 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_4 140 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_5 141 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_6 142 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_7 143 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_8 144 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_9 145 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_10 146 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_11 147 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_12 148 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_13 149 +#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_14 150 +#define CMDQ_EVENT_WPE0_DUMMY_0 151 +#define CMDQ_EVENT_IMGSYS_IPE_DUMMY 152 +#define CMDQ_EVENT_IMGSYS_IPE_FDVT_DONE 153 +#define CMDQ_EVENT_IMGSYS_IPE_ME_DONE 154 +#define CMDQ_EVENT_IMGSYS_IPE_DVS_DONE 155 +#define CMDQ_EVENT_IMGSYS_IPE_DVP_DONE 156 + +#define CMDQ_EVENT_TPR_0 194 +#define CMDQ_EVENT_TPR_1 195 +#define CMDQ_EVENT_TPR_2 196 +#define CMDQ_EVENT_TPR_3 197 +#define CMDQ_EVENT_TPR_4 198 +#define CMDQ_EVENT_TPR_5 199 +#define CMDQ_EVENT_TPR_6 200 +#define CMDQ_EVENT_TPR_7 201 +#define CMDQ_EVENT_TPR_8 202 +#define CMDQ_EVENT_TPR_9 203 +#define CMDQ_EVENT_TPR_10 204 +#define CMDQ_EVENT_TPR_11 205 +#define CMDQ_EVENT_TPR_12 206 +#define CMDQ_EVENT_TPR_13 207 +#define CMDQ_EVENT_TPR_14 208 +#define CMDQ_EVENT_TPR_15 209 +#define CMDQ_EVENT_TPR_16 210 +#define CMDQ_EVENT_TPR_17 211 +#define CMDQ_EVENT_TPR_18 212 +#define CMDQ_EVENT_TPR_19 213 +#define CMDQ_EVENT_TPR_20 214 +#define CMDQ_EVENT_TPR_21 215 +#define CMDQ_EVENT_TPR_22 216 +#define CMDQ_EVENT_TPR_23 217 +#define CMDQ_EVENT_TPR_24 218 +#define CMDQ_EVENT_TPR_25 219 +#define CMDQ_EVENT_TPR_26 220 +#define CMDQ_EVENT_TPR_27 221 +#define CMDQ_EVENT_TPR_28 222 +#define CMDQ_EVENT_TPR_29 223 +#define CMDQ_EVENT_TPR_30 224 +#define CMDQ_EVENT_TPR_31 225 +#define CMDQ_EVENT_TPR_TIMEOUT_0 226 +#define CMDQ_EVENT_TPR_TIMEOUT_1 227 +#define CMDQ_EVENT_TPR_TIMEOUT_2 228 +#define CMDQ_EVENT_TPR_TIMEOUT_3 229 +#define CMDQ_EVENT_TPR_TIMEOUT_4 230 +#define CMDQ_EVENT_TPR_TIMEOUT_5 231 +#define CMDQ_EVENT_TPR_TIMEOUT_6 232 +#define CMDQ_EVENT_TPR_TIMEOUT_7 233 +#define CMDQ_EVENT_TPR_TIMEOUT_8 234 +#define CMDQ_EVENT_TPR_TIMEOUT_9 235 +#define CMDQ_EVENT_TPR_TIMEOUT_10 236 +#define CMDQ_EVENT_TPR_TIMEOUT_11 237 +#define CMDQ_EVENT_TPR_TIMEOUT_12 238 +#define CMDQ_EVENT_TPR_TIMEOUT_13 239 +#define CMDQ_EVENT_TPR_TIMEOUT_14 240 +#define CMDQ_EVENT_TPR_TIMEOUT_15 241 + +#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256 +#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257 +#define CMDQ_EVENT_VPP0_STITCH_SOF 258 +#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259 +#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260 +#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261 +#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262 +#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264 +#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265 +#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266 +#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267 + +#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269 +#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270 +#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271 +#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272 + +#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288 +#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289 +#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290 +#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291 +#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292 +#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293 +#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294 +#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296 +#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297 +#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298 +#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299 + +#define CMDQ_EVENT_VPP0_STREAM_DONE_0 320 +#define CMDQ_EVENT_VPP0_STREAM_DONE_1 321 +#define CMDQ_EVENT_VPP0_STREAM_DONE_2 322 +#define CMDQ_EVENT_VPP0_STREAM_DONE_3 323 +#define CMDQ_EVENT_VPP0_STREAM_DONE_4 324 +#define CMDQ_EVENT_VPP0_STREAM_DONE_5 325 +#define CMDQ_EVENT_VPP0_STREAM_DONE_6 326 +#define CMDQ_EVENT_VPP0_STREAM_DONE_7 327 +#define CMDQ_EVENT_VPP0_STREAM_DONE_8 328 +#define CMDQ_EVENT_VPP0_STREAM_DONE_9 329 +#define CMDQ_EVENT_VPP0_STREAM_DONE_10 330 +#define CMDQ_EVENT_VPP0_STREAM_DONE_11 331 +#define CMDQ_EVENT_VPP0_STREAM_DONE_12 332 +#define CMDQ_EVENT_VPP0_STREAM_DONE_13 333 +#define CMDQ_EVENT_VPP0_STREAM_DONE_14 334 +#define CMDQ_EVENT_VPP0_STREAM_DONE_15 335 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_0 336 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_1 337 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_2 338 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_3 339 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_4 340 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_5 341 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_6 342 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_7 343 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_8 344 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_9 345 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_10 346 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_11 347 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_12 348 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_13 349 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_14 350 +#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_15 351 +#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352 +#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID 353 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354 +#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355 + +#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384 +#define CMDQ_EVENT_VPP1_DGI_SOF 385 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402 +#define CMDQ_EVENT_VPP1_SVPP1_TDSHP_SOF 403 +#define CMDQ_EVENT_VPP1_SVPP2_TDSHP_SOF 404 +#define CMDQ_EVENT_VPP1_SVPP3_TDSHP_SOF 405 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411 +#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417 +#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418 +#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419 +#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420 +#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421 +#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422 +#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433 +#define CMDQ_EVENT_VPP1_FRAME_DONE_10 434 +#define CMDQ_EVENT_VPP1_FRAME_DONE_11 435 +#define CMDQ_EVENT_VPP1_FRAME_DONE_12 436 +#define CMDQ_EVENT_VPP1_FRAME_DONE_13 437 +#define CMDQ_EVENT_VPP1_FRAME_DONE_14 438 +#define CMDQ_EVENT_VPP1_STREAM_DONE_0 439 +#define CMDQ_EVENT_VPP1_STREAM_DONE_1 440 +#define CMDQ_EVENT_VPP1_STREAM_DONE_2 441 +#define CMDQ_EVENT_VPP1_STREAM_DONE_3 442 +#define CMDQ_EVENT_VPP1_STREAM_DONE_4 443 +#define CMDQ_EVENT_VPP1_STREAM_DONE_5 444 +#define CMDQ_EVENT_VPP1_STREAM_DONE_6 445 +#define CMDQ_EVENT_VPP1_STREAM_DONE_7 446 +#define CMDQ_EVENT_VPP1_STREAM_DONE_8 447 +#define CMDQ_EVENT_VPP1_STREAM_DONE_9 448 +#define CMDQ_EVENT_VPP1_STREAM_DONE_10 449 +#define CMDQ_EVENT_VPP1_STREAM_DONE_11 450 +#define CMDQ_EVENT_VPP1_STREAM_DONE_12 451 +#define CMDQ_EVENT_VPP1_STREAM_DONE_13 452 +#define CMDQ_EVENT_VPP1_STREAM_DONE_14 453 +#define CMDQ_EVENT_VPP1_STREAM_DONE_15 454 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_0 455 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_1 456 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_2 457 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_3 458 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_4 459 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_5 460 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_6 461 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_7 462 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_8 463 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_9 464 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_10 465 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_11 466 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_12 467 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_13 468 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_14 469 +#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_15 470 +#define CMDQ_EVENT_VPP1_DGI_0 471 +#define CMDQ_EVENT_VPP1_DGI_1 472 +#define CMDQ_EVENT_VPP1_DGI_2 473 +#define CMDQ_EVENT_VPP1_DGI_3 474 +#define CMDQ_EVENT_VPP1_DGI_4 475 +#define CMDQ_EVENT_VPP1_DGI_5 476 +#define CMDQ_EVENT_VPP1_DGI_6 477 +#define CMDQ_EVENT_VPP1_DGI_7 478 +#define CMDQ_EVENT_VPP1_DGI_8 479 +#define CMDQ_EVENT_VPP1_DGI_9 480 +#define CMDQ_EVENT_VPP1_DGI_10 481 +#define CMDQ_EVENT_VPP1_DGI_11 482 +#define CMDQ_EVENT_VPP1_DGI_12 483 +#define CMDQ_EVENT_VPP1_DGI_13 484 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE 485 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE 486 +#define CMDQ_EVENT_VPP1_MDP_OVL_FRAME_RESET_DONE_PULSE 487 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI 488 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI 489 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE 490 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE 491 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE 492 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 493 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 494 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 495 + +#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513 +#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514 +#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515 +#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516 +#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517 +#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518 +#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519 +#define CMDQ_EVENT_VDO0_DSI0_SOF 520 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521 +#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522 +#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523 +#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524 +#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525 +#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526 +#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527 +#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528 +#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529 +#define CMDQ_EVENT_VDO0_DSI1_SOF 530 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531 +#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532 +#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533 +#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 534 +#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 535 +#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 536 +#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 537 +#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 538 +#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 539 +#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 540 + +#define CMDQ_EVENT_VDO0_DISP_OVL0_FRAME_DONE 544 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 545 +#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 546 +#define CMDQ_EVENT_VDO0_DISP_COLOR0_FRAME_DONE 547 +#define CMDQ_EVENT_VDO0_DISP_CCORR0_FRAME_DONE 548 +#define CMDQ_EVENT_VDO0_DISP_AAL0_FRAME_DONE 549 +#define CMDQ_EVENT_VDO0_DISP_GAMMA0_FRAME_DONE 550 +#define CMDQ_EVENT_VDO0_DISP_DITHER0_FRAME_DONE 551 +#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 552 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_FRAME_DONE 553 +#define CMDQ_EVENT_VDO0_DISP_OVL1_FRAME_DONE 554 +#define CMDQ_EVENT_VDO0_DISP_WDMA1_FRAME_DONE 555 +#define CMDQ_EVENT_VDO0_DISP_RDMA1_FRAME_DONE 556 +#define CMDQ_EVENT_VDO0_DISP_COLOR1_FRAME_DONE 557 +#define CMDQ_EVENT_VDO0_DISP_CCORR1_FRAME_DONE 558 +#define CMDQ_EVENT_VDO0_DISP_AAL1_FRAME_DONE 559 +#define CMDQ_EVENT_VDO0_DISP_GAMMA1_FRAME_DONE 560 +#define CMDQ_EVENT_VDO0_DISP_DITHER1_FRAME_DONE 561 +#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 562 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_FRAME_DONE 563 + +#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 565 + +#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG 576 +#define CMDQ_EVENT_VDO0_DSI0_IRQ_ENG_EVENT_MM 577 +#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 578 +#define CMDQ_EVENT_VDO0_DSI0_DONE_ENG_EVENT_MM 579 +#define CMDQ_EVENT_VDO0_DSI0_SOF_ENG_EVENT_MM 580 +#define CMDQ_EVENT_VDO0_DSI0_VACTL_ENG_EVENT_MM 581 +#define CMDQ_EVENT_VDO0_DSI1_IRQ_ENG_EVENT_MM 582 +#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 583 +#define CMDQ_EVENT_VDO0_DSI1_DONE_ENG_EVENT_MM 584 +#define CMDQ_EVENT_VDO0_DSI1_SOF_ENG_EVENT_MM 585 +#define CMDQ_EVENT_VDO0_DSI1_VACTL_ENG_EVENT_MM 586 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_SW_RST_DONE_ENG 587 +#define CMDQ_EVENT_VDO0_DISP_WDMA1_SW_RST_DONE_ENG 588 +#define CMDQ_EVENT_VDO0_DISP_OVL0_RST_DONE_ENG 589 +#define CMDQ_EVENT_VDO0_DISP_OVL1_RST_DONE_ENG 590 +#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_START_ENG_EVENT_MM 591 +#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_END_ENG_EVENT_MM 592 +#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_START_ENG_EVENT_MM 593 +#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_END_ENG_EVENT_MM 594 +#define CMDQ_EVENT_VDO0_DP_INTF0_TARGET_LINE_ENG_EVENT_MM 595 +#define CMDQ_EVENT_VDO0_VPP_MERGE0_ENG 596 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 597 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 598 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 599 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 600 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 601 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 602 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 603 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 604 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 605 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 606 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 607 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 608 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 609 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 610 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 611 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 612 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_0 613 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_1 614 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_2 615 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_3 616 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_4 617 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_5 618 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_6 619 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_7 620 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_8 621 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_9 622 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_10 623 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_11 624 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_12 625 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_13 626 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_14 627 +#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_15 628 + +#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647 +#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 648 +#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 649 +#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 650 +#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 651 +#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 652 +#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 653 +#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 654 +#define CMDQ_EVENT_VDO1_VDO0_DSC_DL_ASYNC_SOF 655 +#define CMDQ_EVENT_VDO1_VDO0_MERGE_DL_ASYNC_SOF 656 +#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 657 +#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 658 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 659 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 660 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 661 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 662 +#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 663 +#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 664 + +#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 672 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 673 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 674 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 675 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 676 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 677 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 678 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 679 +#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 680 +#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 681 +#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 682 +#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 683 +#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 684 +#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 685 +#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 686 +#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 687 +#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 688 + +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 704 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 705 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 706 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 707 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 708 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 709 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 710 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 711 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 712 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 713 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 714 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 715 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 716 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 717 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 718 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 719 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_0 720 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_1 721 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_2 722 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_3 723 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_4 724 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_5 725 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_6 726 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_7 727 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_8 728 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_9 729 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_10 730 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_11 731 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_12 732 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_13 733 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_14 734 +#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_15 735 +#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 736 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 737 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 738 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 739 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 740 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 741 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 742 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 743 + +#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 745 +#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 746 +#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 747 +#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 748 +#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 749 +#define CMDQ_EVENT_VDO1_VPP_MERGE0 750 +#define CMDQ_EVENT_VDO1_VPP_MERGE1 751 +#define CMDQ_EVENT_VDO1_VPP_MERGE2 752 +#define CMDQ_EVENT_VDO1_VPP_MERGE3 753 +#define CMDQ_EVENT_VDO1_VPP_MERGE4 754 +#define CMDQ_EVENT_VDO1_HDMITX 755 +#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 756 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 757 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 758 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 759 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 760 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 761 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 762 + +#define CMDQ_EVENT_CAM_A_PASS1_DONE 769 +#define CMDQ_EVENT_CAM_B_PASS1_DONE 770 +#define CMDQ_EVENT_GCAMSV_A_PASS1_DONE 771 +#define CMDQ_EVENT_GCAMSV_B_PASS1_DONE 772 +#define CMDQ_EVENT_MRAW_0_PASS1_DONE 773 +#define CMDQ_EVENT_MRAW_1_PASS1_DONE 774 +#define CMDQ_EVENT_MRAW_2_PASS1_DONE 775 +#define CMDQ_EVENT_MRAW_3_PASS1_DONE 776 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL_X 777 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL_X 778 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 779 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 780 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 781 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 782 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 783 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 784 +#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 785 +#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 786 +#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL_X 787 +#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL_X 788 +#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL_X 789 +#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL_X 790 +#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT_X0 791 +#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT_X0 792 +#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 793 +#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 794 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 795 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 796 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 797 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 798 +#define CMDQ_EVENT_U_CAMSYS_PDA_IRQO_EVENT_DONE_D1 799 +#define CMDQ_EVENT_SUBB_TG_INT4 800 +#define CMDQ_EVENT_SUBB_TG_INT3 801 +#define CMDQ_EVENT_SUBB_TG_INT2 802 +#define CMDQ_EVENT_SUBB_TG_INT1 803 +#define CMDQ_EVENT_SUBA_TG_INT4 804 +#define CMDQ_EVENT_SUBA_TG_INT3 805 +#define CMDQ_EVENT_SUBA_TG_INT2 806 +#define CMDQ_EVENT_SUBA_TG_INT1 807 +#define CMDQ_EVENT_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 808 +#define CMDQ_EVENT_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 809 +#define CMDQ_EVENT_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 810 +#define CMDQ_EVENT_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 811 +#define CMDQ_EVENT_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 812 +#define CMDQ_EVENT_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 813 +#define CMDQ_EVENT_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 814 +#define CMDQ_EVENT_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 815 +#define CMDQ_EVENT_GCE1_SOF_0 816 +#define CMDQ_EVENT_GCE1_SOF_1 817 +#define CMDQ_EVENT_GCE1_SOF_2 818 +#define CMDQ_EVENT_GCE1_SOF_3 819 +#define CMDQ_EVENT_GCE1_SOF_4 820 +#define CMDQ_EVENT_GCE1_SOF_5 821 +#define CMDQ_EVENT_GCE1_SOF_6 822 +#define CMDQ_EVENT_GCE1_SOF_7 823 +#define CMDQ_EVENT_GCE1_SOF_8 824 +#define CMDQ_EVENT_GCE1_SOF_9 825 +#define CMDQ_EVENT_GCE1_SOF_10 826 +#define CMDQ_EVENT_GCE1_SOF_11 827 +#define CMDQ_EVENT_GCE1_SOF_12 828 +#define CMDQ_EVENT_GCE1_SOF_13 829 +#define CMDQ_EVENT_GCE1_SOF_14 830 +#define CMDQ_EVENT_GCE1_SOF_15 831 + +#define CMDQ_EVENT_VDEC_LAT_LINE_COUNT_THRESHOLD_INTERRUPT 832 +#define CMDQ_EVENT_VDEC_LAT_VDEC_INT 833 +#define CMDQ_EVENT_VDEC_LAT_VDEC_PAUSE 834 +#define CMDQ_EVENT_VDEC_LAT_VDEC_DEC_ERROR 835 +#define CMDQ_EVENT_VDEC_LAT_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 836 +#define CMDQ_EVENT_VDEC_LAT_VDEC_FRAME_DONE 837 +#define CMDQ_EVENT_VDEC_LAT_INI_FETCH_RDY 838 +#define CMDQ_EVENT_VDEC_LAT_PROCESS_FLAG 839 +#define CMDQ_EVENT_VDEC_LAT_SEARCH_START_CODE_DONE 840 +#define CMDQ_EVENT_VDEC_LAT_REF_REORDER_DONE 841 +#define CMDQ_EVENT_VDEC_LAT_WP_TBLE_DONE 842 +#define CMDQ_EVENT_VDEC_LAT_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 843 +#define CMDQ_EVENT_VDEC_LAT_GCE_CNT_OP_THRESHOLD 847 + +#define CMDQ_EVENT_VDEC_LAT1_LINE_COUNT_THRESHOLD_INTERRUPT 848 +#define CMDQ_EVENT_VDEC_LAT1_VDEC_INT 849 +#define CMDQ_EVENT_VDEC_LAT1_VDEC_PAUSE 850 +#define CMDQ_EVENT_VDEC_LAT1_VDEC_DEC_ERROR 851 +#define CMDQ_EVENT_VDEC_LAT1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 852 +#define CMDQ_EVENT_VDEC_LAT1_VDEC_FRAME_DONE 853 +#define CMDQ_EVENT_VDEC_LAT1_INI_FETCH_RDY 854 +#define CMDQ_EVENT_VDEC_LAT1_PROCESS_FLAG 855 +#define CMDQ_EVENT_VDEC_LAT1_SEARCH_START_CODE_DONE 856 +#define CMDQ_EVENT_VDEC_LAT1_REF_REORDER_DONE 857 +#define CMDQ_EVENT_VDEC_LAT1_WP_TBLE_DONE 858 +#define CMDQ_EVENT_VDEC_LAT1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 859 +#define CMDQ_EVENT_VDEC_LAT1_GCE_CNT_OP_THRESHOLD 863 + +#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_0 864 +#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_1 865 + +#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_8 872 +#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_9 873 + +#define CMDQ_EVENT_VDEC_CORE_LINE_COUNT_THRESHOLD_INTERRUPT 896 +#define CMDQ_EVENT_VDEC_CORE_VDEC_INT 897 +#define CMDQ_EVENT_VDEC_CORE_VDEC_PAUSE 898 +#define CMDQ_EVENT_VDEC_CORE_VDEC_DEC_ERROR 899 +#define CMDQ_EVENT_VDEC_CORE_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 900 +#define CMDQ_EVENT_VDEC_CORE_VDEC_FRAME_DONE 901 +#define CMDQ_EVENT_VDEC_CORE_INI_FETCH_RDY 902 +#define CMDQ_EVENT_VDEC_CORE_PROCESS_FLAG 903 +#define CMDQ_EVENT_VDEC_CORE_SEARCH_START_CODE_DONE 904 +#define CMDQ_EVENT_VDEC_CORE_REF_REORDER_DONE 905 +#define CMDQ_EVENT_VDEC_CORE_WP_TBLE_DONE 906 +#define CMDQ_EVENT_VDEC_CORE_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 907 +#define CMDQ_EVENT_VDEC_CORE_GCE_CNT_OP_THRESHOLD 911 + +#define CMDQ_EVENT_VDEC_CORE1_LINE_COUNT_THRESHOLD_INTERRUPT 912 +#define CMDQ_EVENT_VDEC_CORE1_VDEC_INT 913 +#define CMDQ_EVENT_VDEC_CORE1_VDEC_PAUSE 914 +#define CMDQ_EVENT_VDEC_CORE1_VDEC_DEC_ERROR 915 +#define CMDQ_EVENT_VDEC_CORE1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 916 +#define CMDQ_EVENT_VDEC_CORE1_VDEC_FRAME_DONE 917 +#define CMDQ_EVENT_VDEC_CORE1_INI_FETCH_RDY 918 +#define CMDQ_EVENT_VDEC_CORE1_PROCESS_FLAG 919 +#define CMDQ_EVENT_VDEC_CORE1_SEARCH_START_CODE_DONE 920 +#define CMDQ_EVENT_VDEC_CORE1_REF_REORDER_DONE 921 +#define CMDQ_EVENT_VDEC_CORE1_WP_TBLE_DONE 922 +#define CMDQ_EVENT_VDEC_CORE1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 923 +#define CMDQ_EVENT_VDEC_CORE1_CNT_OP_THRESHOLD 927 + +#define CMDQ_EVENT_VENC_TOP_FRAME_DONE 929 +#define CMDQ_EVENT_VENC_TOP_PAUSE_DONE 930 +#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 931 +#define CMDQ_EVENT_VENC_TOP_MB_DONE 932 +#define CMDQ_EVENT_VENC_TOP_128BYTE_DONE 933 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 934 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_DONE 935 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 936 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_INSUFF_DONE 937 +#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 938 +#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 939 +#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 940 +#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 941 +#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 942 + +#define CMDQ_EVENT_VENC_CORE1_TOP_FRAME_DONE 945 +#define CMDQ_EVENT_VENC_CORE1_TOP_PAUSE_DONE 946 +#define CMDQ_EVENT_VENC_CORE1_TOP_JPGENC_DONE 947 +#define CMDQ_EVENT_VENC_CORE1_TOP_MB_DONE 948 +#define CMDQ_EVENT_VENC_CORE1_TOP_128BYTE_DONE 949 +#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_DONE 950 +#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_DONE 951 +#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_INSUFF_DONE 952 +#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_INSUFF_DONE 953 +#define CMDQ_EVENT_VENC_CORE1_TOP_WP_2ND_STAGE_DONE 954 +#define CMDQ_EVENT_VENC_CORE1_TOP_WP_3RD_STAGE_DONE 955 +#define CMDQ_EVENT_VENC_CORE1_TOP_PPS_HEADER_DONE 956 +#define CMDQ_EVENT_VENC_CORE1_TOP_SPS_HEADER_DONE 957 +#define CMDQ_EVENT_VENC_CORE1_TOP_VPS_HEADER_DONE 958 + +#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 962 +#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 963 + +#define CMDQ_EVENT_WPE_VPP1_WPE_GCE_FRAME_DONE 969 +#define CMDQ_EVENT_WPE_VPP1_WPE_DONE_SYNC_OUT 970 + +#define CMDQ_EVENT_DP_TX_VBLANK_FALLING 994 +#define CMDQ_EVENT_DP_TX_VSC_FINISH 995 + +#define CMDQ_EVENT_OUTPIN_0 1018 +#define CMDQ_EVENT_OUTPIN_1 1019 + +/* end of hw event */ +#define CMDQ_MAX_HW_EVENT 1019 + +#endif diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h index 4627a00e369e..a6ccc031635b 100644 --- a/include/dt-bindings/iio/adc/ingenic,adc.h +++ b/include/dt-bindings/iio/adc/ingenic,adc.h @@ -13,5 +13,6 @@ #define INGENIC_ADC_TOUCH_YN 6 #define INGENIC_ADC_TOUCH_XD 7 #define INGENIC_ADC_TOUCH_YD 8 +#define INGENIC_ADC_AUX0 9 #endif diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h new file mode 100644 index 000000000000..21b000443999 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc7280.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Qualcomm SC7280 interconnect IDs + * + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_0 1 +#define MASTER_QUP_1 2 +#define MASTER_A1NOC_CFG 3 +#define MASTER_PCIE_0 4 +#define MASTER_PCIE_1 5 +#define MASTER_SDCC_1 6 +#define MASTER_SDCC_2 7 +#define MASTER_SDCC_4 8 +#define MASTER_UFS_MEM 9 +#define MASTER_USB2 10 +#define MASTER_USB3_0 11 +#define SLAVE_A1NOC_SNOC 12 +#define SLAVE_ANOC_PCIE_GEM_NOC 13 +#define SLAVE_SERVICE_A1NOC 14 + +#define MASTER_QDSS_BAM 0 +#define MASTER_A2NOC_CFG 1 +#define MASTER_CNOC_A2NOC 2 +#define MASTER_CRYPTO 3 +#define MASTER_IPA 4 +#define MASTER_QDSS_ETR 5 +#define SLAVE_A2NOC_SNOC 6 +#define SLAVE_SERVICE_A2NOC 7 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define SLAVE_QUP_CORE_0 2 +#define SLAVE_QUP_CORE_1 3 + +#define MASTER_CNOC3_CNOC2 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_AHB2PHY_SOUTH 2 +#define SLAVE_AHB2PHY_NORTH 3 +#define SLAVE_CAMERA_CFG 4 +#define SLAVE_CLK_CTL 5 +#define SLAVE_CDSP_CFG 6 +#define SLAVE_RBCPR_CX_CFG 7 +#define SLAVE_RBCPR_MX_CFG 8 +#define SLAVE_CRYPTO_0_CFG 9 +#define SLAVE_CX_RDPM 10 +#define SLAVE_DCC_CFG 11 +#define SLAVE_DISPLAY_CFG 12 +#define SLAVE_GFX3D_CFG 13 +#define SLAVE_HWKM 14 +#define SLAVE_IMEM_CFG 15 +#define SLAVE_IPA_CFG 16 +#define SLAVE_IPC_ROUTER_CFG 17 +#define SLAVE_LPASS 18 +#define SLAVE_CNOC_MSS 19 +#define SLAVE_MX_RDPM 20 +#define SLAVE_PCIE_0_CFG 21 +#define SLAVE_PCIE_1_CFG 22 +#define SLAVE_PDM 23 +#define SLAVE_PIMEM_CFG 24 +#define SLAVE_PKA_WRAPPER_CFG 25 +#define SLAVE_PMU_WRAPPER_CFG 26 +#define SLAVE_QDSS_CFG 27 +#define SLAVE_QSPI_0 28 +#define SLAVE_QUP_0 29 +#define SLAVE_QUP_1 30 +#define SLAVE_SDCC_1 31 +#define SLAVE_SDCC_2 32 +#define SLAVE_SDCC_4 33 +#define SLAVE_SECURITY 34 +#define SLAVE_TCSR 35 +#define SLAVE_TLMM 36 +#define SLAVE_UFS_MEM_CFG 37 +#define SLAVE_USB2 38 +#define SLAVE_USB3_0 39 +#define SLAVE_VENUS_CFG 40 +#define SLAVE_VSENSE_CTRL_CFG 41 +#define SLAVE_A1NOC_CFG 42 +#define SLAVE_A2NOC_CFG 43 +#define SLAVE_CNOC2_CNOC3 44 +#define SLAVE_CNOC_MNOC_CFG 45 +#define SLAVE_SNOC_CFG 46 + +#define MASTER_CNOC2_CNOC3 0 +#define MASTER_GEM_NOC_CNOC 1 +#define MASTER_GEM_NOC_PCIE_SNOC 2 +#define SLAVE_AOSS 3 +#define SLAVE_APPSS 4 +#define SLAVE_CNOC3_CNOC2 5 +#define SLAVE_CNOC_A2NOC 6 +#define SLAVE_DDRSS_CFG 7 +#define SLAVE_BOOT_IMEM 8 +#define SLAVE_IMEM 9 +#define SLAVE_PIMEM 10 +#define SLAVE_PCIE_0 11 +#define SLAVE_PCIE_1 12 +#define SLAVE_QDSS_STM 13 +#define SLAVE_TCU 14 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_GEM_NOC_CFG 4 +#define MASTER_GFX3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_ANOC_PCIE_GEM_NOC 8 +#define MASTER_SNOC_GC_MEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define SLAVE_MSS_PROC_MS_MPU_CFG 11 +#define SLAVE_MCDMA_MS_MPU_CFG 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_MEM_NOC_PCIE_SNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 + +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define SLAVE_LPASS_CORE_CFG 1 +#define SLAVE_LPASS_LPI_CFG 2 +#define SLAVE_LPASS_MPU_CFG 3 +#define SLAVE_LPASS_TOP_CFG 4 +#define SLAVE_SERVICES_LPASS_AML_NOC 5 +#define SLAVE_SERVICE_LPASS_AG_NOC 6 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_VIDEO_P0 1 +#define MASTER_VIDEO_PROC 2 +#define MASTER_CAMNOC_HF 3 +#define MASTER_CAMNOC_ICP 4 +#define MASTER_CAMNOC_SF 5 +#define MASTER_MDP0 6 +#define SLAVE_MNOC_HF_MEM_NOC 7 +#define SLAVE_MNOC_SF_MEM_NOC 8 +#define SLAVE_SERVICE_MNOC 9 + +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_SERVICE_NSP_NOC 3 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_SNOC_CFG 2 +#define MASTER_PIMEM 3 +#define MASTER_GIC 4 +#define SLAVE_SNOC_GEM_NOC_GC 5 +#define SLAVE_SNOC_GEM_NOC_SF 6 +#define SLAVE_SERVICE_SNOC 7 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h new file mode 100644 index 000000000000..235b525d2803 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc8180x.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Qualcomm SC8180x interconnect IDs + * + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_UFS_CARD 1 +#define MASTER_UFS_GEN4 2 +#define MASTER_UFS_MEM 3 +#define MASTER_USB3 4 +#define MASTER_USB3_1 5 +#define MASTER_USB3_2 6 +#define A1NOC_SNOC_SLV 7 +#define SLAVE_SERVICE_A1NOC 8 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QSPI_0 2 +#define MASTER_QSPI_1 3 +#define MASTER_QUP_0 4 +#define MASTER_QUP_1 5 +#define MASTER_QUP_2 6 +#define MASTER_SENSORS_AHB 7 +#define MASTER_CRYPTO_CORE_0 8 +#define MASTER_IPA 9 +#define MASTER_EMAC 10 +#define MASTER_PCIE 11 +#define MASTER_PCIE_1 12 +#define MASTER_PCIE_2 13 +#define MASTER_PCIE_3 14 +#define MASTER_QDSS_ETR 15 +#define MASTER_SDCC_2 16 +#define MASTER_SDCC_4 17 +#define A2NOC_SNOC_SLV 18 +#define SLAVE_ANOC_PCIE_GEM_NOC 19 +#define SLAVE_SERVICE_A2NOC 20 + +#define MASTER_CAMNOC_HF0_UNCOMP 0 +#define MASTER_CAMNOC_HF1_UNCOMP 1 +#define MASTER_CAMNOC_SF_UNCOMP 2 +#define SLAVE_CAMNOC_UNCOMP 3 + +#define MASTER_NPU 0 +#define SLAVE_CDSP_MEM_NOC 1 + +#define SNOC_CNOC_MAS 0 +#define SLAVE_A1NOC_CFG 1 +#define SLAVE_A2NOC_CFG 2 +#define SLAVE_AHB2PHY_CENTER 3 +#define SLAVE_AHB2PHY_EAST 4 +#define SLAVE_AHB2PHY_WEST 5 +#define SLAVE_AHB2PHY_SOUTH 6 +#define SLAVE_AOP 7 +#define SLAVE_AOSS 8 +#define SLAVE_CAMERA_CFG 9 +#define SLAVE_CLK_CTL 10 +#define SLAVE_CDSP_CFG 11 +#define SLAVE_RBCPR_CX_CFG 12 +#define SLAVE_RBCPR_MMCX_CFG 13 +#define SLAVE_RBCPR_MX_CFG 14 +#define SLAVE_CRYPTO_0_CFG 15 +#define SLAVE_CNOC_DDRSS 16 +#define SLAVE_DISPLAY_CFG 17 +#define SLAVE_EMAC_CFG 18 +#define SLAVE_GLM 19 +#define SLAVE_GRAPHICS_3D_CFG 20 +#define SLAVE_IMEM_CFG 21 +#define SLAVE_IPA_CFG 22 +#define SLAVE_CNOC_MNOC_CFG 23 +#define SLAVE_NPU_CFG 24 +#define SLAVE_PCIE_0_CFG 25 +#define SLAVE_PCIE_1_CFG 26 +#define SLAVE_PCIE_2_CFG 27 +#define SLAVE_PCIE_3_CFG 28 +#define SLAVE_PDM 29 +#define SLAVE_PIMEM_CFG 30 +#define SLAVE_PRNG 31 +#define SLAVE_QDSS_CFG 32 +#define SLAVE_QSPI_0 33 +#define SLAVE_QSPI_1 34 +#define SLAVE_QUP_1 35 +#define SLAVE_QUP_2 36 +#define SLAVE_QUP_0 37 +#define SLAVE_SDCC_2 38 +#define SLAVE_SDCC_4 39 +#define SLAVE_SECURITY 40 +#define SLAVE_SNOC_CFG 41 +#define SLAVE_SPSS_CFG 42 +#define SLAVE_TCSR 43 +#define SLAVE_TLMM_EAST 44 +#define SLAVE_TLMM_SOUTH 45 +#define SLAVE_TLMM_WEST 46 +#define SLAVE_TSIF 47 +#define SLAVE_UFS_CARD_CFG 48 +#define SLAVE_UFS_MEM_0_CFG 49 +#define SLAVE_UFS_MEM_1_CFG 50 +#define SLAVE_USB3 51 +#define SLAVE_USB3_1 52 +#define SLAVE_USB3_2 53 +#define SLAVE_VENUS_CFG 54 +#define SLAVE_VSENSE_CTRL_CFG 55 +#define SLAVE_SERVICE_CNOC 56 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_GEM_NOC_CFG 1 +#define SLAVE_LLCC_CFG 2 + +#define MASTER_AMPSS_M0 0 +#define MASTER_GPU_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_GEM_NOC_CFG 3 +#define MASTER_COMPUTE_NOC 4 +#define MASTER_GRAPHICS_3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_GEM_NOC_PCIE_SNOC 8 +#define MASTER_SNOC_GC_MEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define MASTER_ECC 11 +#define SLAVE_MSS_PROC_MS_MPU_CFG 12 +#define SLAVE_ECC 13 +#define SLAVE_GEM_NOC_SNOC 14 +#define SLAVE_LLCC 15 +#define SLAVE_SERVICE_GEM_NOC 16 +#define SLAVE_SERVICE_GEM_NOC_1 17 + +#define MASTER_IPA_CORE 0 +#define SLAVE_IPA_CORE 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI_CH0 1 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_CAMNOC_HF0 1 +#define MASTER_CAMNOC_HF1 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_MDP_PORT0 4 +#define MASTER_MDP_PORT1 5 +#define MASTER_ROTATOR 6 +#define MASTER_VIDEO_P0 7 +#define MASTER_VIDEO_P1 8 +#define MASTER_VIDEO_PROC 9 +#define SLAVE_MNOC_SF_MEM_NOC 10 +#define SLAVE_MNOC_HF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 + +#define MASTER_SNOC_CFG 0 +#define A1NOC_SNOC_MAS 1 +#define A2NOC_SNOC_MAS 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_PIMEM 4 +#define MASTER_GIC 5 +#define SLAVE_APPSS 6 +#define SNOC_CNOC_SLV 7 +#define SLAVE_SNOC_GEM_NOC_GC 8 +#define SLAVE_SNOC_GEM_NOC_SF 9 +#define SLAVE_OCIMEM 10 +#define SLAVE_PIMEM 11 +#define SLAVE_SERVICE_SNOC 12 +#define SLAVE_PCIE_0 13 +#define SLAVE_PCIE_1 14 +#define SLAVE_PCIE_2 15 +#define SLAVE_PCIE_3 16 +#define SLAVE_QDSS_STM 17 +#define SLAVE_TCU 18 + +#define MASTER_MNOC_HF_MEM_NOC_DISPLAY 0 +#define MASTER_MNOC_SF_MEM_NOC_DISPLAY 1 +#define SLAVE_LLCC_DISPLAY 2 + +#define MASTER_LLCC_DISPLAY 0 +#define SLAVE_EBI_CH0_DISPLAY 1 + +#define MASTER_MDP_PORT0_DISPLAY 0 +#define MASTER_MDP_PORT1_DISPLAY 1 +#define MASTER_ROTATOR_DISPLAY 2 +#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3 +#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4 + +#endif diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h new file mode 100644 index 000000000000..125c6351bba0 --- /dev/null +++ b/include/dt-bindings/leds/rt4831-backlight.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for rt4831 backlight bindings. + * + * Copyright (C) 2020, Richtek Technology Corp. + * Author: ChiYuan Huang <cy_huang@richtek.com> + */ + +#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H +#define _DT_BINDINGS_RT4831_BACKLIGHT_H + +#define RT4831_BLOVPLVL_17V 0 +#define RT4831_BLOVPLVL_21V 1 +#define RT4831_BLOVPLVL_25V 2 +#define RT4831_BLOVPLVL_29V 3 + +#define RT4831_BLED_CH1EN (1 << 0) +#define RT4831_BLED_CH2EN (1 << 1) +#define RT4831_BLED_CH3EN (1 << 2) +#define RT4831_BLED_CH4EN (1 << 3) +#define RT4831_BLED_ALLCHEN ((1 << 4) - 1) + +#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */ diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h index 4c23eefed5f3..eb91a6c05b71 100644 --- a/include/dt-bindings/mailbox/qcom-ipcc.h +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -29,5 +29,6 @@ #define IPCC_CLIENT_PCIE1 14 #define IPCC_CLIENT_PCIE2 15 #define IPCC_CLIENT_SPSS 16 +#define IPCC_CLIENT_WPSS 24 #endif diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h new file mode 100644 index 000000000000..eca9448df228 --- /dev/null +++ b/include/dt-bindings/mfd/qcom-pm8008.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H +#define __DT_BINDINGS_MFD_QCOM_PM8008_H + +/* PM8008 IRQ numbers */ +#define PM8008_IRQ_MISC_UVLO 0 +#define PM8008_IRQ_MISC_OVLO 1 +#define PM8008_IRQ_MISC_OTST2 2 +#define PM8008_IRQ_MISC_OTST3 3 +#define PM8008_IRQ_MISC_LDO_OCP 4 +#define PM8008_IRQ_TEMP_ALARM 5 +#define PM8008_IRQ_GPIO1 6 +#define PM8008_IRQ_GPIO2 7 + +#endif diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index 887a31b250a8..f48c9acf251e 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -20,5 +20,7 @@ #define PHY_TYPE_XPCS 7 #define PHY_TYPE_SGMII 8 #define PHY_TYPE_QSGMII 9 +#define PHY_TYPE_DPHY 10 +#define PHY_TYPE_CPHY 11 #endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/apple.h b/include/dt-bindings/pinctrl/apple.h new file mode 100644 index 000000000000..ea0a6f466592 --- /dev/null +++ b/include/dt-bindings/pinctrl/apple.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR MIT */ +/* + * This header provides constants for Apple pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_APPLE_H +#define _DT_BINDINGS_PINCTRL_APPLE_H + +#define APPLE_PINMUX(pin, func) ((pin) | ((func) << 16)) +#define APPLE_PIN(pinmux) ((pinmux) & 0xffff) +#define APPLE_FUNC(pinmux) ((pinmux) >> 16) + +#endif /* _DT_BINDINGS_PINCTRL_APPLE_H */ diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h index 0359bfdc9119..93064c750c8c 100644 --- a/include/dt-bindings/pinctrl/hisi.h +++ b/include/dt-bindings/pinctrl/hisi.h @@ -1,7 +1,7 @@ /* * This header provides constants for hisilicon pinctrl bindings. * - * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2015 HiSilicon Limited. * Copyright (c) 2015 Linaro Limited. * * This program is free software; you can redistribute it and/or modify diff --git a/include/dt-bindings/pinctrl/mt8135-pinfunc.h b/include/dt-bindings/pinctrl/mt8135-pinfunc.h new file mode 100644 index 000000000000..ce0cb5a440eb --- /dev/null +++ b/include/dt-bindings/pinctrl/mt8135-pinfunc.h @@ -0,0 +1,1294 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com> + */ + +#ifndef __DTS_MT8135_PINFUNC_H +#define __DTS_MT8135_PINFUNC_H + +#include <dt-bindings/pinctrl/mt65xx.h> + +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(0) | 1) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_EINT49 (MTK_PIN_NO(0) | 2) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_I2SOUT_DAT (MTK_PIN_NO(0) | 3) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_DAC_DAT_OUT (MTK_PIN_NO(0) | 4) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_PCM1_DO (MTK_PIN_NO(0) | 5) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_SPI1_MO (MTK_PIN_NO(0) | 6) +#define MT8135_PIN_0_MSDC0_DAT7__FUNC_NALE (MTK_PIN_NO(0) | 7) + +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(1) | 1) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_EINT48 (MTK_PIN_NO(1) | 2) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_I2SIN_WS (MTK_PIN_NO(1) | 3) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_DAC_WS (MTK_PIN_NO(1) | 4) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_PCM1_WS (MTK_PIN_NO(1) | 5) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_SPI1_CSN (MTK_PIN_NO(1) | 6) +#define MT8135_PIN_1_MSDC0_DAT6__FUNC_NCLE (MTK_PIN_NO(1) | 7) + +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(2) | 1) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_EINT47 (MTK_PIN_NO(2) | 2) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_I2SIN_CK (MTK_PIN_NO(2) | 3) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_DAC_CK (MTK_PIN_NO(2) | 4) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_PCM1_CK (MTK_PIN_NO(2) | 5) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_SPI1_CLK (MTK_PIN_NO(2) | 6) +#define MT8135_PIN_2_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(2) | 7) + +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(3) | 1) +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_EINT46 (MTK_PIN_NO(3) | 2) +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_A_FUNC_CK (MTK_PIN_NO(3) | 3) +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_LSCE1B_2X (MTK_PIN_NO(3) | 6) +#define MT8135_PIN_3_MSDC0_DAT4__FUNC_NLD5 (MTK_PIN_NO(3) | 7) + +#define MT8135_PIN_4_MSDC0_CMD__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(4) | 1) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_EINT41 (MTK_PIN_NO(4) | 2) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_A_FUNC_DOUT_0 (MTK_PIN_NO(4) | 3) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_USB_TEST_IO_0 (MTK_PIN_NO(4) | 5) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_LRSTB_2X (MTK_PIN_NO(4) | 6) +#define MT8135_PIN_4_MSDC0_CMD__FUNC_NRNB (MTK_PIN_NO(4) | 7) + +#define MT8135_PIN_5_MSDC0_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(5) | 1) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_EINT40 (MTK_PIN_NO(5) | 2) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_A_FUNC_DOUT_1 (MTK_PIN_NO(5) | 3) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_USB_TEST_IO_1 (MTK_PIN_NO(5) | 5) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_LPTE (MTK_PIN_NO(5) | 6) +#define MT8135_PIN_5_MSDC0_CLK__FUNC_NREB (MTK_PIN_NO(5) | 7) + +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(6) | 1) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_EINT45 (MTK_PIN_NO(6) | 2) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_A_FUNC_DOUT_2 (MTK_PIN_NO(6) | 3) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_USB_TEST_IO_2 (MTK_PIN_NO(6) | 5) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_LSCE0B_2X (MTK_PIN_NO(6) | 6) +#define MT8135_PIN_6_MSDC0_DAT3__FUNC_NLD7 (MTK_PIN_NO(6) | 7) + +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(7) | 1) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_EINT44 (MTK_PIN_NO(7) | 2) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_A_FUNC_DOUT_3 (MTK_PIN_NO(7) | 3) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_USB_TEST_IO_3 (MTK_PIN_NO(7) | 5) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_LSA0_2X (MTK_PIN_NO(7) | 6) +#define MT8135_PIN_7_MSDC0_DAT2__FUNC_NLD14 (MTK_PIN_NO(7) | 7) + +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(8) | 1) +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_EINT43 (MTK_PIN_NO(8) | 2) +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_USB_TEST_IO_4 (MTK_PIN_NO(8) | 5) +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_LSCK_2X (MTK_PIN_NO(8) | 6) +#define MT8135_PIN_8_MSDC0_DAT1__FUNC_NLD11 (MTK_PIN_NO(8) | 7) + +#define MT8135_PIN_9_MSDC0_DAT0__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT8135_PIN_9_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(9) | 1) +#define MT8135_PIN_9_MSDC0_DAT0__FUNC_EINT42 (MTK_PIN_NO(9) | 2) +#define MT8135_PIN_9_MSDC0_DAT0__FUNC_USB_TEST_IO_5 (MTK_PIN_NO(9) | 5) +#define MT8135_PIN_9_MSDC0_DAT0__FUNC_LSDA_2X (MTK_PIN_NO(9) | 6) + +#define MT8135_PIN_10_NCEB0__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT8135_PIN_10_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(10) | 1) +#define MT8135_PIN_10_NCEB0__FUNC_EINT139 (MTK_PIN_NO(10) | 2) +#define MT8135_PIN_10_NCEB0__FUNC_TESTA_OUT4 (MTK_PIN_NO(10) | 7) + +#define MT8135_PIN_11_NCEB1__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT8135_PIN_11_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(11) | 1) +#define MT8135_PIN_11_NCEB1__FUNC_EINT140 (MTK_PIN_NO(11) | 2) +#define MT8135_PIN_11_NCEB1__FUNC_USB_DRVVBUS (MTK_PIN_NO(11) | 6) +#define MT8135_PIN_11_NCEB1__FUNC_TESTA_OUT5 (MTK_PIN_NO(11) | 7) + +#define MT8135_PIN_12_NRNB__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT8135_PIN_12_NRNB__FUNC_NRNB (MTK_PIN_NO(12) | 1) +#define MT8135_PIN_12_NRNB__FUNC_EINT141 (MTK_PIN_NO(12) | 2) +#define MT8135_PIN_12_NRNB__FUNC_A_FUNC_DOUT_4 (MTK_PIN_NO(12) | 3) +#define MT8135_PIN_12_NRNB__FUNC_TESTA_OUT6 (MTK_PIN_NO(12) | 7) + +#define MT8135_PIN_13_NCLE__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT8135_PIN_13_NCLE__FUNC_NCLE (MTK_PIN_NO(13) | 1) +#define MT8135_PIN_13_NCLE__FUNC_EINT142 (MTK_PIN_NO(13) | 2) +#define MT8135_PIN_13_NCLE__FUNC_A_FUNC_DOUT_5 (MTK_PIN_NO(13) | 3) +#define MT8135_PIN_13_NCLE__FUNC_CM2PDN_1X (MTK_PIN_NO(13) | 4) +#define MT8135_PIN_13_NCLE__FUNC_NALE (MTK_PIN_NO(13) | 6) +#define MT8135_PIN_13_NCLE__FUNC_TESTA_OUT7 (MTK_PIN_NO(13) | 7) + +#define MT8135_PIN_14_NALE__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT8135_PIN_14_NALE__FUNC_NALE (MTK_PIN_NO(14) | 1) +#define MT8135_PIN_14_NALE__FUNC_EINT143 (MTK_PIN_NO(14) | 2) +#define MT8135_PIN_14_NALE__FUNC_A_FUNC_DOUT_6 (MTK_PIN_NO(14) | 3) +#define MT8135_PIN_14_NALE__FUNC_CM2MCLK_1X (MTK_PIN_NO(14) | 4) +#define MT8135_PIN_14_NALE__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5) +#define MT8135_PIN_14_NALE__FUNC_NCLE (MTK_PIN_NO(14) | 6) +#define MT8135_PIN_14_NALE__FUNC_TESTA_OUT8 (MTK_PIN_NO(14) | 7) + +#define MT8135_PIN_15_NREB__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT8135_PIN_15_NREB__FUNC_NREB (MTK_PIN_NO(15) | 1) +#define MT8135_PIN_15_NREB__FUNC_EINT144 (MTK_PIN_NO(15) | 2) +#define MT8135_PIN_15_NREB__FUNC_A_FUNC_DOUT_7 (MTK_PIN_NO(15) | 3) +#define MT8135_PIN_15_NREB__FUNC_CM2RST_1X (MTK_PIN_NO(15) | 4) +#define MT8135_PIN_15_NREB__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5) +#define MT8135_PIN_15_NREB__FUNC_TESTA_OUT9 (MTK_PIN_NO(15) | 7) + +#define MT8135_PIN_16_NWEB__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT8135_PIN_16_NWEB__FUNC_NWEB (MTK_PIN_NO(16) | 1) +#define MT8135_PIN_16_NWEB__FUNC_EINT145 (MTK_PIN_NO(16) | 2) +#define MT8135_PIN_16_NWEB__FUNC_A_FUNC_DIN_0 (MTK_PIN_NO(16) | 3) +#define MT8135_PIN_16_NWEB__FUNC_CM2PCLK_1X (MTK_PIN_NO(16) | 4) +#define MT8135_PIN_16_NWEB__FUNC_IRDA_PDN (MTK_PIN_NO(16) | 5) +#define MT8135_PIN_16_NWEB__FUNC_TESTA_OUT10 (MTK_PIN_NO(16) | 7) + +#define MT8135_PIN_17_NLD0__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT8135_PIN_17_NLD0__FUNC_NLD0 (MTK_PIN_NO(17) | 1) +#define MT8135_PIN_17_NLD0__FUNC_EINT146 (MTK_PIN_NO(17) | 2) +#define MT8135_PIN_17_NLD0__FUNC_A_FUNC_DIN_1 (MTK_PIN_NO(17) | 3) +#define MT8135_PIN_17_NLD0__FUNC_CM2DAT_1X_0 (MTK_PIN_NO(17) | 4) +#define MT8135_PIN_17_NLD0__FUNC_I2SIN_CK (MTK_PIN_NO(17) | 5) +#define MT8135_PIN_17_NLD0__FUNC_DAC_CK (MTK_PIN_NO(17) | 6) +#define MT8135_PIN_17_NLD0__FUNC_TESTA_OUT11 (MTK_PIN_NO(17) | 7) + +#define MT8135_PIN_18_NLD1__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT8135_PIN_18_NLD1__FUNC_NLD1 (MTK_PIN_NO(18) | 1) +#define MT8135_PIN_18_NLD1__FUNC_EINT147 (MTK_PIN_NO(18) | 2) +#define MT8135_PIN_18_NLD1__FUNC_A_FUNC_DIN_2 (MTK_PIN_NO(18) | 3) +#define MT8135_PIN_18_NLD1__FUNC_CM2DAT_1X_1 (MTK_PIN_NO(18) | 4) +#define MT8135_PIN_18_NLD1__FUNC_I2SIN_WS (MTK_PIN_NO(18) | 5) +#define MT8135_PIN_18_NLD1__FUNC_DAC_WS (MTK_PIN_NO(18) | 6) +#define MT8135_PIN_18_NLD1__FUNC_TESTA_OUT12 (MTK_PIN_NO(18) | 7) + +#define MT8135_PIN_19_NLD2__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT8135_PIN_19_NLD2__FUNC_NLD2 (MTK_PIN_NO(19) | 1) +#define MT8135_PIN_19_NLD2__FUNC_EINT148 (MTK_PIN_NO(19) | 2) +#define MT8135_PIN_19_NLD2__FUNC_A_FUNC_DIN_3 (MTK_PIN_NO(19) | 3) +#define MT8135_PIN_19_NLD2__FUNC_CM2DAT_1X_2 (MTK_PIN_NO(19) | 4) +#define MT8135_PIN_19_NLD2__FUNC_I2SOUT_DAT (MTK_PIN_NO(19) | 5) +#define MT8135_PIN_19_NLD2__FUNC_DAC_DAT_OUT (MTK_PIN_NO(19) | 6) +#define MT8135_PIN_19_NLD2__FUNC_TESTA_OUT13 (MTK_PIN_NO(19) | 7) + +#define MT8135_PIN_20_NLD3__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT8135_PIN_20_NLD3__FUNC_NLD3 (MTK_PIN_NO(20) | 1) +#define MT8135_PIN_20_NLD3__FUNC_EINT149 (MTK_PIN_NO(20) | 2) +#define MT8135_PIN_20_NLD3__FUNC_A_FUNC_DIN_4 (MTK_PIN_NO(20) | 3) +#define MT8135_PIN_20_NLD3__FUNC_CM2DAT_1X_3 (MTK_PIN_NO(20) | 4) +#define MT8135_PIN_20_NLD3__FUNC_TESTA_OUT14 (MTK_PIN_NO(20) | 7) + +#define MT8135_PIN_21_NLD4__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT8135_PIN_21_NLD4__FUNC_NLD4 (MTK_PIN_NO(21) | 1) +#define MT8135_PIN_21_NLD4__FUNC_EINT150 (MTK_PIN_NO(21) | 2) +#define MT8135_PIN_21_NLD4__FUNC_A_FUNC_DIN_5 (MTK_PIN_NO(21) | 3) +#define MT8135_PIN_21_NLD4__FUNC_CM2DAT_1X_4 (MTK_PIN_NO(21) | 4) +#define MT8135_PIN_21_NLD4__FUNC_TESTA_OUT15 (MTK_PIN_NO(21) | 7) + +#define MT8135_PIN_22_NLD5__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT8135_PIN_22_NLD5__FUNC_NLD5 (MTK_PIN_NO(22) | 1) +#define MT8135_PIN_22_NLD5__FUNC_EINT151 (MTK_PIN_NO(22) | 2) +#define MT8135_PIN_22_NLD5__FUNC_A_FUNC_DIN_6 (MTK_PIN_NO(22) | 3) +#define MT8135_PIN_22_NLD5__FUNC_CM2DAT_1X_5 (MTK_PIN_NO(22) | 4) +#define MT8135_PIN_22_NLD5__FUNC_TESTA_OUT16 (MTK_PIN_NO(22) | 7) + +#define MT8135_PIN_23_NLD6__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT8135_PIN_23_NLD6__FUNC_NLD6 (MTK_PIN_NO(23) | 1) +#define MT8135_PIN_23_NLD6__FUNC_EINT152 (MTK_PIN_NO(23) | 2) +#define MT8135_PIN_23_NLD6__FUNC_A_FUNC_DIN_7 (MTK_PIN_NO(23) | 3) +#define MT8135_PIN_23_NLD6__FUNC_CM2DAT_1X_6 (MTK_PIN_NO(23) | 4) +#define MT8135_PIN_23_NLD6__FUNC_TESTA_OUT17 (MTK_PIN_NO(23) | 7) + +#define MT8135_PIN_24_NLD7__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT8135_PIN_24_NLD7__FUNC_NLD7 (MTK_PIN_NO(24) | 1) +#define MT8135_PIN_24_NLD7__FUNC_EINT153 (MTK_PIN_NO(24) | 2) +#define MT8135_PIN_24_NLD7__FUNC_A_FUNC_DIN_8 (MTK_PIN_NO(24) | 3) +#define MT8135_PIN_24_NLD7__FUNC_CM2DAT_1X_7 (MTK_PIN_NO(24) | 4) +#define MT8135_PIN_24_NLD7__FUNC_TESTA_OUT18 (MTK_PIN_NO(24) | 7) + +#define MT8135_PIN_25_NLD8__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT8135_PIN_25_NLD8__FUNC_NLD8 (MTK_PIN_NO(25) | 1) +#define MT8135_PIN_25_NLD8__FUNC_EINT154 (MTK_PIN_NO(25) | 2) +#define MT8135_PIN_25_NLD8__FUNC_CM2DAT_1X_8 (MTK_PIN_NO(25) | 4) + +#define MT8135_PIN_26_NLD9__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT8135_PIN_26_NLD9__FUNC_NLD9 (MTK_PIN_NO(26) | 1) +#define MT8135_PIN_26_NLD9__FUNC_EINT155 (MTK_PIN_NO(26) | 2) +#define MT8135_PIN_26_NLD9__FUNC_CM2DAT_1X_9 (MTK_PIN_NO(26) | 4) +#define MT8135_PIN_26_NLD9__FUNC_PWM1 (MTK_PIN_NO(26) | 5) + +#define MT8135_PIN_27_NLD10__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT8135_PIN_27_NLD10__FUNC_NLD10 (MTK_PIN_NO(27) | 1) +#define MT8135_PIN_27_NLD10__FUNC_EINT156 (MTK_PIN_NO(27) | 2) +#define MT8135_PIN_27_NLD10__FUNC_CM2VSYNC_1X (MTK_PIN_NO(27) | 4) +#define MT8135_PIN_27_NLD10__FUNC_PWM2 (MTK_PIN_NO(27) | 5) + +#define MT8135_PIN_28_NLD11__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT8135_PIN_28_NLD11__FUNC_NLD11 (MTK_PIN_NO(28) | 1) +#define MT8135_PIN_28_NLD11__FUNC_EINT157 (MTK_PIN_NO(28) | 2) +#define MT8135_PIN_28_NLD11__FUNC_CM2HSYNC_1X (MTK_PIN_NO(28) | 4) +#define MT8135_PIN_28_NLD11__FUNC_PWM3 (MTK_PIN_NO(28) | 5) + +#define MT8135_PIN_29_NLD12__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT8135_PIN_29_NLD12__FUNC_NLD12 (MTK_PIN_NO(29) | 1) +#define MT8135_PIN_29_NLD12__FUNC_EINT158 (MTK_PIN_NO(29) | 2) +#define MT8135_PIN_29_NLD12__FUNC_I2SIN_CK (MTK_PIN_NO(29) | 3) +#define MT8135_PIN_29_NLD12__FUNC_DAC_CK (MTK_PIN_NO(29) | 4) +#define MT8135_PIN_29_NLD12__FUNC_PCM1_CK (MTK_PIN_NO(29) | 5) + +#define MT8135_PIN_30_NLD13__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT8135_PIN_30_NLD13__FUNC_NLD13 (MTK_PIN_NO(30) | 1) +#define MT8135_PIN_30_NLD13__FUNC_EINT159 (MTK_PIN_NO(30) | 2) +#define MT8135_PIN_30_NLD13__FUNC_I2SIN_WS (MTK_PIN_NO(30) | 3) +#define MT8135_PIN_30_NLD13__FUNC_DAC_WS (MTK_PIN_NO(30) | 4) +#define MT8135_PIN_30_NLD13__FUNC_PCM1_WS (MTK_PIN_NO(30) | 5) + +#define MT8135_PIN_31_NLD14__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT8135_PIN_31_NLD14__FUNC_NLD14 (MTK_PIN_NO(31) | 1) +#define MT8135_PIN_31_NLD14__FUNC_EINT160 (MTK_PIN_NO(31) | 2) +#define MT8135_PIN_31_NLD14__FUNC_I2SOUT_DAT (MTK_PIN_NO(31) | 3) +#define MT8135_PIN_31_NLD14__FUNC_DAC_DAT_OUT (MTK_PIN_NO(31) | 4) +#define MT8135_PIN_31_NLD14__FUNC_PCM1_DO (MTK_PIN_NO(31) | 5) + +#define MT8135_PIN_32_NLD15__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT8135_PIN_32_NLD15__FUNC_NLD15 (MTK_PIN_NO(32) | 1) +#define MT8135_PIN_32_NLD15__FUNC_EINT161 (MTK_PIN_NO(32) | 2) +#define MT8135_PIN_32_NLD15__FUNC_DISP_PWM (MTK_PIN_NO(32) | 3) +#define MT8135_PIN_32_NLD15__FUNC_PWM4 (MTK_PIN_NO(32) | 4) +#define MT8135_PIN_32_NLD15__FUNC_PCM1_DI (MTK_PIN_NO(32) | 5) + +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(33) | 1) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_EINT50 (MTK_PIN_NO(33) | 2) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_I2SIN_DAT (MTK_PIN_NO(33) | 3) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_PCM1_DI (MTK_PIN_NO(33) | 5) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_SPI1_MI (MTK_PIN_NO(33) | 6) +#define MT8135_PIN_33_MSDC0_RSTB__FUNC_NLD10 (MTK_PIN_NO(33) | 7) + +#define MT8135_PIN_34_IDDIG__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT8135_PIN_34_IDDIG__FUNC_IDDIG (MTK_PIN_NO(34) | 1) +#define MT8135_PIN_34_IDDIG__FUNC_EINT34 (MTK_PIN_NO(34) | 2) + +#define MT8135_PIN_35_SCL3__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT8135_PIN_35_SCL3__FUNC_SCL3 (MTK_PIN_NO(35) | 1) +#define MT8135_PIN_35_SCL3__FUNC_EINT96 (MTK_PIN_NO(35) | 2) +#define MT8135_PIN_35_SCL3__FUNC_CLKM6 (MTK_PIN_NO(35) | 3) +#define MT8135_PIN_35_SCL3__FUNC_PWM6 (MTK_PIN_NO(35) | 4) + +#define MT8135_PIN_36_SDA3__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT8135_PIN_36_SDA3__FUNC_SDA3 (MTK_PIN_NO(36) | 1) +#define MT8135_PIN_36_SDA3__FUNC_EINT97 (MTK_PIN_NO(36) | 2) + +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(37) | 1) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_ADC_CK (MTK_PIN_NO(37) | 2) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_HDMI_SDATA0 (MTK_PIN_NO(37) | 3) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_EINT19 (MTK_PIN_NO(37) | 4) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_USB_TEST_IO_6 (MTK_PIN_NO(37) | 5) +#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_TESTA_OUT19 (MTK_PIN_NO(37) | 7) + +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(38) | 1) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_ADC_WS (MTK_PIN_NO(38) | 2) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MISO (MTK_PIN_NO(38) | 3) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_EINT21 (MTK_PIN_NO(38) | 4) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_USB_TEST_IO_7 (MTK_PIN_NO(38) | 5) +#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_TESTA_OUT20 (MTK_PIN_NO(38) | 7) + +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MISO (MTK_PIN_NO(39) | 1) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_ADC_DAT_IN (MTK_PIN_NO(39) | 2) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(39) | 3) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_EINT20 (MTK_PIN_NO(39) | 4) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_USB_TEST_IO_8 (MTK_PIN_NO(39) | 5) +#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_TESTA_OUT21 (MTK_PIN_NO(39) | 7) + +#define MT8135_PIN_40_DAC_CLK__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT8135_PIN_40_DAC_CLK__FUNC_DAC_CK (MTK_PIN_NO(40) | 1) +#define MT8135_PIN_40_DAC_CLK__FUNC_EINT22 (MTK_PIN_NO(40) | 2) +#define MT8135_PIN_40_DAC_CLK__FUNC_HDMI_SDATA1 (MTK_PIN_NO(40) | 3) +#define MT8135_PIN_40_DAC_CLK__FUNC_USB_TEST_IO_9 (MTK_PIN_NO(40) | 5) +#define MT8135_PIN_40_DAC_CLK__FUNC_TESTA_OUT22 (MTK_PIN_NO(40) | 7) + +#define MT8135_PIN_41_DAC_WS__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT8135_PIN_41_DAC_WS__FUNC_DAC_WS (MTK_PIN_NO(41) | 1) +#define MT8135_PIN_41_DAC_WS__FUNC_EINT24 (MTK_PIN_NO(41) | 2) +#define MT8135_PIN_41_DAC_WS__FUNC_HDMI_SDATA2 (MTK_PIN_NO(41) | 3) +#define MT8135_PIN_41_DAC_WS__FUNC_USB_TEST_IO_10 (MTK_PIN_NO(41) | 5) +#define MT8135_PIN_41_DAC_WS__FUNC_TESTA_OUT23 (MTK_PIN_NO(41) | 7) + +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(42) | 1) +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_EINT23 (MTK_PIN_NO(42) | 2) +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_HDMI_SDATA3 (MTK_PIN_NO(42) | 3) +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_USB_TEST_IO_11 (MTK_PIN_NO(42) | 5) +#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_TESTA_OUT24 (MTK_PIN_NO(42) | 7) + +#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_PWRAP_SPIDI (MTK_PIN_NO(43) | 1) +#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_EINT29 (MTK_PIN_NO(43) | 2) + +#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_PWRAP_SPIDO (MTK_PIN_NO(44) | 1) +#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_EINT28 (MTK_PIN_NO(44) | 2) + +#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(45) | 1) +#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_EINT27 (MTK_PIN_NO(45) | 2) + +#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_PWRAP_SPICK_I (MTK_PIN_NO(46) | 1) +#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_EINT26 (MTK_PIN_NO(46) | 2) + +#define MT8135_PIN_47_PWRAP_EVENT__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT8135_PIN_47_PWRAP_EVENT__FUNC_PWRAP_EVENT_IN (MTK_PIN_NO(47) | 1) +#define MT8135_PIN_47_PWRAP_EVENT__FUNC_EINT25 (MTK_PIN_NO(47) | 2) +#define MT8135_PIN_47_PWRAP_EVENT__FUNC_TESTA_OUT2 (MTK_PIN_NO(47) | 7) + +#define MT8135_PIN_48_RTC32K_CK__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT8135_PIN_48_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(48) | 1) + +#define MT8135_PIN_49_WATCHDOG__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT8135_PIN_49_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(49) | 1) +#define MT8135_PIN_49_WATCHDOG__FUNC_EINT36 (MTK_PIN_NO(49) | 2) + +#define MT8135_PIN_50_SRCLKENA__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define MT8135_PIN_50_SRCLKENA__FUNC_SRCLKENA (MTK_PIN_NO(50) | 1) +#define MT8135_PIN_50_SRCLKENA__FUNC_EINT38 (MTK_PIN_NO(50) | 2) + +#define MT8135_PIN_51_SRCVOLTEN__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define MT8135_PIN_51_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(51) | 1) +#define MT8135_PIN_51_SRCVOLTEN__FUNC_EINT37 (MTK_PIN_NO(51) | 2) + +#define MT8135_PIN_52_EINT0__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define MT8135_PIN_52_EINT0__FUNC_EINT0 (MTK_PIN_NO(52) | 1) +#define MT8135_PIN_52_EINT0__FUNC_PWM1 (MTK_PIN_NO(52) | 2) +#define MT8135_PIN_52_EINT0__FUNC_CLKM0 (MTK_PIN_NO(52) | 3) +#define MT8135_PIN_52_EINT0__FUNC_SPDIF_OUT (MTK_PIN_NO(52) | 4) +#define MT8135_PIN_52_EINT0__FUNC_USB_TEST_IO_12 (MTK_PIN_NO(52) | 5) +#define MT8135_PIN_52_EINT0__FUNC_USB_SCL (MTK_PIN_NO(52) | 7) + +#define MT8135_PIN_53_URXD2__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT8135_PIN_53_URXD2__FUNC_URXD2 (MTK_PIN_NO(53) | 1) +#define MT8135_PIN_53_URXD2__FUNC_EINT83 (MTK_PIN_NO(53) | 2) +#define MT8135_PIN_53_URXD2__FUNC_HDMI_LRCK (MTK_PIN_NO(53) | 4) +#define MT8135_PIN_53_URXD2__FUNC_CLKM3 (MTK_PIN_NO(53) | 5) +#define MT8135_PIN_53_URXD2__FUNC_UTXD2 (MTK_PIN_NO(53) | 7) + +#define MT8135_PIN_54_UTXD2__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT8135_PIN_54_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(54) | 1) +#define MT8135_PIN_54_UTXD2__FUNC_EINT82 (MTK_PIN_NO(54) | 2) +#define MT8135_PIN_54_UTXD2__FUNC_HDMI_BCK_OUT (MTK_PIN_NO(54) | 4) +#define MT8135_PIN_54_UTXD2__FUNC_CLKM2 (MTK_PIN_NO(54) | 5) +#define MT8135_PIN_54_UTXD2__FUNC_URXD2 (MTK_PIN_NO(54) | 7) + +#define MT8135_PIN_55_UCTS2__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT8135_PIN_55_UCTS2__FUNC_UCTS2 (MTK_PIN_NO(55) | 1) +#define MT8135_PIN_55_UCTS2__FUNC_EINT84 (MTK_PIN_NO(55) | 2) +#define MT8135_PIN_55_UCTS2__FUNC_PWM1 (MTK_PIN_NO(55) | 5) +#define MT8135_PIN_55_UCTS2__FUNC_URTS2 (MTK_PIN_NO(55) | 7) + +#define MT8135_PIN_56_URTS2__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT8135_PIN_56_URTS2__FUNC_URTS2 (MTK_PIN_NO(56) | 1) +#define MT8135_PIN_56_URTS2__FUNC_EINT85 (MTK_PIN_NO(56) | 2) +#define MT8135_PIN_56_URTS2__FUNC_PWM2 (MTK_PIN_NO(56) | 5) +#define MT8135_PIN_56_URTS2__FUNC_UCTS2 (MTK_PIN_NO(56) | 7) + +#define MT8135_PIN_57_JTCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT8135_PIN_57_JTCK__FUNC_JTCK (MTK_PIN_NO(57) | 1) +#define MT8135_PIN_57_JTCK__FUNC_EINT188 (MTK_PIN_NO(57) | 2) +#define MT8135_PIN_57_JTCK__FUNC_DSP1_ICK (MTK_PIN_NO(57) | 3) + +#define MT8135_PIN_58_JTDO__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT8135_PIN_58_JTDO__FUNC_JTDO (MTK_PIN_NO(58) | 1) +#define MT8135_PIN_58_JTDO__FUNC_EINT190 (MTK_PIN_NO(58) | 2) +#define MT8135_PIN_58_JTDO__FUNC_DSP2_IMS (MTK_PIN_NO(58) | 3) + +#define MT8135_PIN_59_JTRST_B__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define MT8135_PIN_59_JTRST_B__FUNC_JTRST_B (MTK_PIN_NO(59) | 1) +#define MT8135_PIN_59_JTRST_B__FUNC_EINT0 (MTK_PIN_NO(59) | 2) +#define MT8135_PIN_59_JTRST_B__FUNC_DSP2_ICK (MTK_PIN_NO(59) | 3) + +#define MT8135_PIN_60_JTDI__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT8135_PIN_60_JTDI__FUNC_JTDI (MTK_PIN_NO(60) | 1) +#define MT8135_PIN_60_JTDI__FUNC_EINT189 (MTK_PIN_NO(60) | 2) +#define MT8135_PIN_60_JTDI__FUNC_DSP1_IMS (MTK_PIN_NO(60) | 3) + +#define MT8135_PIN_61_JRTCK__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT8135_PIN_61_JRTCK__FUNC_JRTCK (MTK_PIN_NO(61) | 1) +#define MT8135_PIN_61_JRTCK__FUNC_EINT187 (MTK_PIN_NO(61) | 2) +#define MT8135_PIN_61_JRTCK__FUNC_DSP1_ID (MTK_PIN_NO(61) | 3) + +#define MT8135_PIN_62_JTMS__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT8135_PIN_62_JTMS__FUNC_JTMS (MTK_PIN_NO(62) | 1) +#define MT8135_PIN_62_JTMS__FUNC_EINT191 (MTK_PIN_NO(62) | 2) +#define MT8135_PIN_62_JTMS__FUNC_DSP2_ID (MTK_PIN_NO(62) | 3) + +#define MT8135_PIN_63_MSDC1_INSI__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT8135_PIN_63_MSDC1_INSI__FUNC_MSDC1_INSI (MTK_PIN_NO(63) | 1) +#define MT8135_PIN_63_MSDC1_INSI__FUNC_SCL5 (MTK_PIN_NO(63) | 3) +#define MT8135_PIN_63_MSDC1_INSI__FUNC_PWM6 (MTK_PIN_NO(63) | 4) +#define MT8135_PIN_63_MSDC1_INSI__FUNC_CLKM5 (MTK_PIN_NO(63) | 5) +#define MT8135_PIN_63_MSDC1_INSI__FUNC_TESTB_OUT6 (MTK_PIN_NO(63) | 7) + +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_MSDC1_SDWPI (MTK_PIN_NO(64) | 1) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_EINT58 (MTK_PIN_NO(64) | 2) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_SDA5 (MTK_PIN_NO(64) | 3) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_PWM7 (MTK_PIN_NO(64) | 4) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_CLKM6 (MTK_PIN_NO(64) | 5) +#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_TESTB_OUT7 (MTK_PIN_NO(64) | 7) + +#define MT8135_PIN_65_MSDC2_INSI__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT8135_PIN_65_MSDC2_INSI__FUNC_MSDC2_INSI (MTK_PIN_NO(65) | 1) +#define MT8135_PIN_65_MSDC2_INSI__FUNC_USB_TEST_IO_27 (MTK_PIN_NO(65) | 5) +#define MT8135_PIN_65_MSDC2_INSI__FUNC_TESTA_OUT3 (MTK_PIN_NO(65) | 7) + +#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_MSDC2_SDWPI (MTK_PIN_NO(66) | 1) +#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_EINT66 (MTK_PIN_NO(66) | 2) +#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_USB_TEST_IO_28 (MTK_PIN_NO(66) | 5) + +#define MT8135_PIN_67_URXD4__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT8135_PIN_67_URXD4__FUNC_URXD4 (MTK_PIN_NO(67) | 1) +#define MT8135_PIN_67_URXD4__FUNC_EINT89 (MTK_PIN_NO(67) | 2) +#define MT8135_PIN_67_URXD4__FUNC_URXD1 (MTK_PIN_NO(67) | 3) +#define MT8135_PIN_67_URXD4__FUNC_UTXD4 (MTK_PIN_NO(67) | 6) +#define MT8135_PIN_67_URXD4__FUNC_TESTB_OUT10 (MTK_PIN_NO(67) | 7) + +#define MT8135_PIN_68_UTXD4__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT8135_PIN_68_UTXD4__FUNC_UTXD4 (MTK_PIN_NO(68) | 1) +#define MT8135_PIN_68_UTXD4__FUNC_EINT88 (MTK_PIN_NO(68) | 2) +#define MT8135_PIN_68_UTXD4__FUNC_UTXD1 (MTK_PIN_NO(68) | 3) +#define MT8135_PIN_68_UTXD4__FUNC_URXD4 (MTK_PIN_NO(68) | 6) +#define MT8135_PIN_68_UTXD4__FUNC_TESTB_OUT11 (MTK_PIN_NO(68) | 7) + +#define MT8135_PIN_69_URXD1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT8135_PIN_69_URXD1__FUNC_URXD1 (MTK_PIN_NO(69) | 1) +#define MT8135_PIN_69_URXD1__FUNC_EINT79 (MTK_PIN_NO(69) | 2) +#define MT8135_PIN_69_URXD1__FUNC_URXD4 (MTK_PIN_NO(69) | 3) +#define MT8135_PIN_69_URXD1__FUNC_UTXD1 (MTK_PIN_NO(69) | 6) +#define MT8135_PIN_69_URXD1__FUNC_TESTB_OUT24 (MTK_PIN_NO(69) | 7) + +#define MT8135_PIN_70_UTXD1__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT8135_PIN_70_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(70) | 1) +#define MT8135_PIN_70_UTXD1__FUNC_EINT78 (MTK_PIN_NO(70) | 2) +#define MT8135_PIN_70_UTXD1__FUNC_UTXD4 (MTK_PIN_NO(70) | 3) +#define MT8135_PIN_70_UTXD1__FUNC_URXD1 (MTK_PIN_NO(70) | 6) +#define MT8135_PIN_70_UTXD1__FUNC_TESTB_OUT25 (MTK_PIN_NO(70) | 7) + +#define MT8135_PIN_71_UCTS1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT8135_PIN_71_UCTS1__FUNC_UCTS1 (MTK_PIN_NO(71) | 1) +#define MT8135_PIN_71_UCTS1__FUNC_EINT80 (MTK_PIN_NO(71) | 2) +#define MT8135_PIN_71_UCTS1__FUNC_CLKM0 (MTK_PIN_NO(71) | 5) +#define MT8135_PIN_71_UCTS1__FUNC_URTS1 (MTK_PIN_NO(71) | 6) +#define MT8135_PIN_71_UCTS1__FUNC_TESTB_OUT31 (MTK_PIN_NO(71) | 7) + +#define MT8135_PIN_72_URTS1__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT8135_PIN_72_URTS1__FUNC_URTS1 (MTK_PIN_NO(72) | 1) +#define MT8135_PIN_72_URTS1__FUNC_EINT81 (MTK_PIN_NO(72) | 2) +#define MT8135_PIN_72_URTS1__FUNC_CLKM1 (MTK_PIN_NO(72) | 5) +#define MT8135_PIN_72_URTS1__FUNC_UCTS1 (MTK_PIN_NO(72) | 6) +#define MT8135_PIN_72_URTS1__FUNC_TESTB_OUT21 (MTK_PIN_NO(72) | 7) + +#define MT8135_PIN_73_PWM1__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT8135_PIN_73_PWM1__FUNC_PWM1 (MTK_PIN_NO(73) | 1) +#define MT8135_PIN_73_PWM1__FUNC_EINT73 (MTK_PIN_NO(73) | 2) +#define MT8135_PIN_73_PWM1__FUNC_USB_DRVVBUS (MTK_PIN_NO(73) | 5) +#define MT8135_PIN_73_PWM1__FUNC_DISP_PWM (MTK_PIN_NO(73) | 6) +#define MT8135_PIN_73_PWM1__FUNC_TESTB_OUT8 (MTK_PIN_NO(73) | 7) + +#define MT8135_PIN_74_PWM2__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT8135_PIN_74_PWM2__FUNC_PWM2 (MTK_PIN_NO(74) | 1) +#define MT8135_PIN_74_PWM2__FUNC_EINT74 (MTK_PIN_NO(74) | 2) +#define MT8135_PIN_74_PWM2__FUNC_DPI33_CK (MTK_PIN_NO(74) | 3) +#define MT8135_PIN_74_PWM2__FUNC_PWM5 (MTK_PIN_NO(74) | 4) +#define MT8135_PIN_74_PWM2__FUNC_URXD2 (MTK_PIN_NO(74) | 5) +#define MT8135_PIN_74_PWM2__FUNC_DISP_PWM (MTK_PIN_NO(74) | 6) +#define MT8135_PIN_74_PWM2__FUNC_TESTB_OUT9 (MTK_PIN_NO(74) | 7) + +#define MT8135_PIN_75_PWM3__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT8135_PIN_75_PWM3__FUNC_PWM3 (MTK_PIN_NO(75) | 1) +#define MT8135_PIN_75_PWM3__FUNC_EINT75 (MTK_PIN_NO(75) | 2) +#define MT8135_PIN_75_PWM3__FUNC_DPI33_D0 (MTK_PIN_NO(75) | 3) +#define MT8135_PIN_75_PWM3__FUNC_PWM6 (MTK_PIN_NO(75) | 4) +#define MT8135_PIN_75_PWM3__FUNC_UTXD2 (MTK_PIN_NO(75) | 5) +#define MT8135_PIN_75_PWM3__FUNC_DISP_PWM (MTK_PIN_NO(75) | 6) +#define MT8135_PIN_75_PWM3__FUNC_TESTB_OUT12 (MTK_PIN_NO(75) | 7) + +#define MT8135_PIN_76_PWM4__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT8135_PIN_76_PWM4__FUNC_PWM4 (MTK_PIN_NO(76) | 1) +#define MT8135_PIN_76_PWM4__FUNC_EINT76 (MTK_PIN_NO(76) | 2) +#define MT8135_PIN_76_PWM4__FUNC_DPI33_D1 (MTK_PIN_NO(76) | 3) +#define MT8135_PIN_76_PWM4__FUNC_PWM7 (MTK_PIN_NO(76) | 4) +#define MT8135_PIN_76_PWM4__FUNC_DISP_PWM (MTK_PIN_NO(76) | 6) +#define MT8135_PIN_76_PWM4__FUNC_TESTB_OUT13 (MTK_PIN_NO(76) | 7) + +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(77) | 1) +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_EINT63 (MTK_PIN_NO(77) | 2) +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DSP2_IMS (MTK_PIN_NO(77) | 4) +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DPI33_D6 (MTK_PIN_NO(77) | 6) +#define MT8135_PIN_77_MSDC2_DAT2__FUNC_TESTA_OUT25 (MTK_PIN_NO(77) | 7) + +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(78) | 1) +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_EINT64 (MTK_PIN_NO(78) | 2) +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DSP2_ID (MTK_PIN_NO(78) | 4) +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DPI33_D7 (MTK_PIN_NO(78) | 6) +#define MT8135_PIN_78_MSDC2_DAT3__FUNC_TESTA_OUT26 (MTK_PIN_NO(78) | 7) + +#define MT8135_PIN_79_MSDC2_CMD__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(79) | 1) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_EINT60 (MTK_PIN_NO(79) | 2) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_DSP1_IMS (MTK_PIN_NO(79) | 4) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_PCM1_WS (MTK_PIN_NO(79) | 5) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_DPI33_D3 (MTK_PIN_NO(79) | 6) +#define MT8135_PIN_79_MSDC2_CMD__FUNC_TESTA_OUT0 (MTK_PIN_NO(79) | 7) + +#define MT8135_PIN_80_MSDC2_CLK__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(80) | 1) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_EINT59 (MTK_PIN_NO(80) | 2) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_DSP1_ICK (MTK_PIN_NO(80) | 4) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_PCM1_CK (MTK_PIN_NO(80) | 5) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_DPI33_D2 (MTK_PIN_NO(80) | 6) +#define MT8135_PIN_80_MSDC2_CLK__FUNC_TESTA_OUT1 (MTK_PIN_NO(80) | 7) + +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(81) | 1) +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_EINT62 (MTK_PIN_NO(81) | 2) +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DSP2_ICK (MTK_PIN_NO(81) | 4) +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_PCM1_DO (MTK_PIN_NO(81) | 5) +#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DPI33_D5 (MTK_PIN_NO(81) | 6) + +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1) +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_EINT61 (MTK_PIN_NO(82) | 2) +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DSP1_ID (MTK_PIN_NO(82) | 4) +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_PCM1_DI (MTK_PIN_NO(82) | 5) +#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DPI33_D4 (MTK_PIN_NO(82) | 6) + +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(83) | 1) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_EINT53 (MTK_PIN_NO(83) | 2) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_SCL1 (MTK_PIN_NO(83) | 3) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_PWM2 (MTK_PIN_NO(83) | 4) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_CLKM1 (MTK_PIN_NO(83) | 5) +#define MT8135_PIN_83_MSDC1_DAT0__FUNC_TESTB_OUT2 (MTK_PIN_NO(83) | 7) + +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(84) | 1) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_EINT54 (MTK_PIN_NO(84) | 2) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_SDA1 (MTK_PIN_NO(84) | 3) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_PWM3 (MTK_PIN_NO(84) | 4) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_CLKM2 (MTK_PIN_NO(84) | 5) +#define MT8135_PIN_84_MSDC1_DAT1__FUNC_TESTB_OUT3 (MTK_PIN_NO(84) | 7) + +#define MT8135_PIN_85_MSDC1_CMD__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_EINT52 (MTK_PIN_NO(85) | 2) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_SDA0 (MTK_PIN_NO(85) | 3) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_PWM1 (MTK_PIN_NO(85) | 4) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_CLKM0 (MTK_PIN_NO(85) | 5) +#define MT8135_PIN_85_MSDC1_CMD__FUNC_TESTB_OUT1 (MTK_PIN_NO(85) | 7) + +#define MT8135_PIN_86_MSDC1_CLK__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define MT8135_PIN_86_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(86) | 1) +#define MT8135_PIN_86_MSDC1_CLK__FUNC_EINT51 (MTK_PIN_NO(86) | 2) +#define MT8135_PIN_86_MSDC1_CLK__FUNC_SCL0 (MTK_PIN_NO(86) | 3) +#define MT8135_PIN_86_MSDC1_CLK__FUNC_DISP_PWM (MTK_PIN_NO(86) | 4) +#define MT8135_PIN_86_MSDC1_CLK__FUNC_TESTB_OUT0 (MTK_PIN_NO(86) | 7) + +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(87) | 1) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_EINT55 (MTK_PIN_NO(87) | 2) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_SCL4 (MTK_PIN_NO(87) | 3) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_PWM4 (MTK_PIN_NO(87) | 4) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_CLKM3 (MTK_PIN_NO(87) | 5) +#define MT8135_PIN_87_MSDC1_DAT2__FUNC_TESTB_OUT4 (MTK_PIN_NO(87) | 7) + +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(88) | 1) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_EINT56 (MTK_PIN_NO(88) | 2) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_SDA4 (MTK_PIN_NO(88) | 3) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_PWM5 (MTK_PIN_NO(88) | 4) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_CLKM4 (MTK_PIN_NO(88) | 5) +#define MT8135_PIN_88_MSDC1_DAT3__FUNC_TESTB_OUT5 (MTK_PIN_NO(88) | 7) + +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_MSDC4_DAT0 (MTK_PIN_NO(89) | 1) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EINT133 (MTK_PIN_NO(89) | 2) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(89) | 4) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_USB_DRVVBUS (MTK_PIN_NO(89) | 5) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_A_FUNC_DIN_9 (MTK_PIN_NO(89) | 6) +#define MT8135_PIN_89_MSDC4_DAT0__FUNC_LPTE (MTK_PIN_NO(89) | 7) + +#define MT8135_PIN_90_MSDC4_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define MT8135_PIN_90_MSDC4_DAT1__FUNC_MSDC4_DAT1 (MTK_PIN_NO(90) | 1) +#define MT8135_PIN_90_MSDC4_DAT1__FUNC_EINT134 (MTK_PIN_NO(90) | 2) +#define MT8135_PIN_90_MSDC4_DAT1__FUNC_A_FUNC_DIN_10 (MTK_PIN_NO(90) | 6) +#define MT8135_PIN_90_MSDC4_DAT1__FUNC_LRSTB_1X (MTK_PIN_NO(90) | 7) + +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_MSDC4_DAT5 (MTK_PIN_NO(91) | 1) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_EINT136 (MTK_PIN_NO(91) | 2) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_I2SIN_WS (MTK_PIN_NO(91) | 3) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_DAC_WS (MTK_PIN_NO(91) | 4) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_PCM1_WS (MTK_PIN_NO(91) | 5) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_A_FUNC_DIN_11 (MTK_PIN_NO(91) | 6) +#define MT8135_PIN_91_MSDC4_DAT5__FUNC_SPI1_CSN (MTK_PIN_NO(91) | 7) + +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_MSDC4_DAT6 (MTK_PIN_NO(92) | 1) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_EINT137 (MTK_PIN_NO(92) | 2) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_I2SOUT_DAT (MTK_PIN_NO(92) | 3) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_DAC_DAT_OUT (MTK_PIN_NO(92) | 4) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_PCM1_DO (MTK_PIN_NO(92) | 5) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_A_FUNC_DIN_12 (MTK_PIN_NO(92) | 6) +#define MT8135_PIN_92_MSDC4_DAT6__FUNC_SPI1_MO (MTK_PIN_NO(92) | 7) + +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_MSDC4_DAT7 (MTK_PIN_NO(93) | 1) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_EINT138 (MTK_PIN_NO(93) | 2) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_I2SIN_DAT (MTK_PIN_NO(93) | 3) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_PCM1_DI (MTK_PIN_NO(93) | 5) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_A_FUNC_DIN_13 (MTK_PIN_NO(93) | 6) +#define MT8135_PIN_93_MSDC4_DAT7__FUNC_SPI1_MI (MTK_PIN_NO(93) | 7) + +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_MSDC4_DAT4 (MTK_PIN_NO(94) | 1) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_EINT135 (MTK_PIN_NO(94) | 2) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_I2SIN_CK (MTK_PIN_NO(94) | 3) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_DAC_CK (MTK_PIN_NO(94) | 4) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_PCM1_CK (MTK_PIN_NO(94) | 5) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_A_FUNC_DIN_14 (MTK_PIN_NO(94) | 6) +#define MT8135_PIN_94_MSDC4_DAT4__FUNC_SPI1_CLK (MTK_PIN_NO(94) | 7) + +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_MSDC4_DAT2 (MTK_PIN_NO(95) | 1) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_EINT131 (MTK_PIN_NO(95) | 2) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_I2SIN_WS (MTK_PIN_NO(95) | 3) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_CM2PDN_2X (MTK_PIN_NO(95) | 4) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_DAC_WS (MTK_PIN_NO(95) | 5) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_PCM1_WS (MTK_PIN_NO(95) | 6) +#define MT8135_PIN_95_MSDC4_DAT2__FUNC_LSCE0B_1X (MTK_PIN_NO(95) | 7) + +#define MT8135_PIN_96_MSDC4_CLK__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_MSDC4_CLK (MTK_PIN_NO(96) | 1) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_EINT129 (MTK_PIN_NO(96) | 2) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_DPI1_CK_2X (MTK_PIN_NO(96) | 3) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_CM2PCLK_2X (MTK_PIN_NO(96) | 4) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_PWM4 (MTK_PIN_NO(96) | 5) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_PCM1_DI (MTK_PIN_NO(96) | 6) +#define MT8135_PIN_96_MSDC4_CLK__FUNC_LSCK_1X (MTK_PIN_NO(96) | 7) + +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_MSDC4_DAT3 (MTK_PIN_NO(97) | 1) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_EINT132 (MTK_PIN_NO(97) | 2) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_I2SOUT_DAT (MTK_PIN_NO(97) | 3) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_CM2RST_2X (MTK_PIN_NO(97) | 4) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_DAC_DAT_OUT (MTK_PIN_NO(97) | 5) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_PCM1_DO (MTK_PIN_NO(97) | 6) +#define MT8135_PIN_97_MSDC4_DAT3__FUNC_LSCE1B_1X (MTK_PIN_NO(97) | 7) + +#define MT8135_PIN_98_MSDC4_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT8135_PIN_98_MSDC4_CMD__FUNC_MSDC4_CMD (MTK_PIN_NO(98) | 1) +#define MT8135_PIN_98_MSDC4_CMD__FUNC_EINT128 (MTK_PIN_NO(98) | 2) +#define MT8135_PIN_98_MSDC4_CMD__FUNC_DPI1_DE_2X (MTK_PIN_NO(98) | 3) +#define MT8135_PIN_98_MSDC4_CMD__FUNC_PWM3 (MTK_PIN_NO(98) | 5) +#define MT8135_PIN_98_MSDC4_CMD__FUNC_LSDA_1X (MTK_PIN_NO(98) | 7) + +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_MSDC4_RSTB (MTK_PIN_NO(99) | 1) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_EINT130 (MTK_PIN_NO(99) | 2) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_I2SIN_CK (MTK_PIN_NO(99) | 3) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_CM2MCLK_2X (MTK_PIN_NO(99) | 4) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_DAC_CK (MTK_PIN_NO(99) | 5) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_PCM1_CK (MTK_PIN_NO(99) | 6) +#define MT8135_PIN_99_MSDC4_RSTB__FUNC_LSA0_1X (MTK_PIN_NO(99) | 7) + +#define MT8135_PIN_100_SDA0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT8135_PIN_100_SDA0__FUNC_SDA0 (MTK_PIN_NO(100) | 1) +#define MT8135_PIN_100_SDA0__FUNC_EINT91 (MTK_PIN_NO(100) | 2) +#define MT8135_PIN_100_SDA0__FUNC_CLKM1 (MTK_PIN_NO(100) | 3) +#define MT8135_PIN_100_SDA0__FUNC_PWM1 (MTK_PIN_NO(100) | 4) +#define MT8135_PIN_100_SDA0__FUNC_A_FUNC_DIN_15 (MTK_PIN_NO(100) | 7) + +#define MT8135_PIN_101_SCL0__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT8135_PIN_101_SCL0__FUNC_SCL0 (MTK_PIN_NO(101) | 1) +#define MT8135_PIN_101_SCL0__FUNC_EINT90 (MTK_PIN_NO(101) | 2) +#define MT8135_PIN_101_SCL0__FUNC_CLKM0 (MTK_PIN_NO(101) | 3) +#define MT8135_PIN_101_SCL0__FUNC_DISP_PWM (MTK_PIN_NO(101) | 4) +#define MT8135_PIN_101_SCL0__FUNC_A_FUNC_DIN_16 (MTK_PIN_NO(101) | 7) + +#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_EINT10 (MTK_PIN_NO(102) | 1) +#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_USB_TEST_IO_16 (MTK_PIN_NO(102) | 5) +#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_TESTB_OUT16 (MTK_PIN_NO(102) | 6) +#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_A_FUNC_DIN_17 (MTK_PIN_NO(102) | 7) + +#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_EINT11 (MTK_PIN_NO(103) | 1) +#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_USB_TEST_IO_17 (MTK_PIN_NO(103) | 5) +#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_TESTB_OUT17 (MTK_PIN_NO(103) | 6) +#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_A_FUNC_DIN_18 (MTK_PIN_NO(103) | 7) + +#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_EINT16 (MTK_PIN_NO(104) | 1) +#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_USB_TEST_IO_18 (MTK_PIN_NO(104) | 5) +#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_TESTB_OUT18 (MTK_PIN_NO(104) | 6) +#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_A_FUNC_DIN_19 (MTK_PIN_NO(104) | 7) + +#define MT8135_PIN_105_I2S_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT8135_PIN_105_I2S_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(105) | 1) +#define MT8135_PIN_105_I2S_CLK__FUNC_EINT10 (MTK_PIN_NO(105) | 2) +#define MT8135_PIN_105_I2S_CLK__FUNC_DAC_CK (MTK_PIN_NO(105) | 3) +#define MT8135_PIN_105_I2S_CLK__FUNC_PCM1_CK (MTK_PIN_NO(105) | 4) +#define MT8135_PIN_105_I2S_CLK__FUNC_USB_TEST_IO_19 (MTK_PIN_NO(105) | 5) +#define MT8135_PIN_105_I2S_CLK__FUNC_TESTB_OUT19 (MTK_PIN_NO(105) | 6) +#define MT8135_PIN_105_I2S_CLK__FUNC_A_FUNC_DIN_20 (MTK_PIN_NO(105) | 7) + +#define MT8135_PIN_106_I2S_WS__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT8135_PIN_106_I2S_WS__FUNC_I2SIN_WS (MTK_PIN_NO(106) | 1) +#define MT8135_PIN_106_I2S_WS__FUNC_EINT13 (MTK_PIN_NO(106) | 2) +#define MT8135_PIN_106_I2S_WS__FUNC_DAC_WS (MTK_PIN_NO(106) | 3) +#define MT8135_PIN_106_I2S_WS__FUNC_PCM1_WS (MTK_PIN_NO(106) | 4) +#define MT8135_PIN_106_I2S_WS__FUNC_USB_TEST_IO_20 (MTK_PIN_NO(106) | 5) +#define MT8135_PIN_106_I2S_WS__FUNC_TESTB_OUT20 (MTK_PIN_NO(106) | 6) +#define MT8135_PIN_106_I2S_WS__FUNC_A_FUNC_DIN_21 (MTK_PIN_NO(106) | 7) + +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_I2SIN_DAT (MTK_PIN_NO(107) | 1) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_EINT11 (MTK_PIN_NO(107) | 2) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_PCM1_DI (MTK_PIN_NO(107) | 4) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_USB_TEST_IO_21 (MTK_PIN_NO(107) | 5) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_TESTB_OUT22 (MTK_PIN_NO(107) | 6) +#define MT8135_PIN_107_I2S_DATA_IN__FUNC_A_FUNC_DIN_22 (MTK_PIN_NO(107) | 7) + +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_I2SOUT_DAT (MTK_PIN_NO(108) | 1) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_EINT12 (MTK_PIN_NO(108) | 2) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(108) | 3) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_PCM1_DO (MTK_PIN_NO(108) | 4) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_USB_TEST_IO_22 (MTK_PIN_NO(108) | 5) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_TESTB_OUT23 (MTK_PIN_NO(108) | 6) +#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_A_FUNC_DIN_23 (MTK_PIN_NO(108) | 7) + +#define MT8135_PIN_109_EINT5__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT8135_PIN_109_EINT5__FUNC_EINT5 (MTK_PIN_NO(109) | 1) +#define MT8135_PIN_109_EINT5__FUNC_PWM5 (MTK_PIN_NO(109) | 2) +#define MT8135_PIN_109_EINT5__FUNC_CLKM3 (MTK_PIN_NO(109) | 3) +#define MT8135_PIN_109_EINT5__FUNC_GPU_JTRSTB (MTK_PIN_NO(109) | 4) +#define MT8135_PIN_109_EINT5__FUNC_USB_TEST_IO_23 (MTK_PIN_NO(109) | 5) +#define MT8135_PIN_109_EINT5__FUNC_TESTB_OUT26 (MTK_PIN_NO(109) | 6) +#define MT8135_PIN_109_EINT5__FUNC_A_FUNC_DIN_24 (MTK_PIN_NO(109) | 7) + +#define MT8135_PIN_110_EINT6__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT8135_PIN_110_EINT6__FUNC_EINT6 (MTK_PIN_NO(110) | 1) +#define MT8135_PIN_110_EINT6__FUNC_PWM6 (MTK_PIN_NO(110) | 2) +#define MT8135_PIN_110_EINT6__FUNC_CLKM4 (MTK_PIN_NO(110) | 3) +#define MT8135_PIN_110_EINT6__FUNC_GPU_JTMS (MTK_PIN_NO(110) | 4) +#define MT8135_PIN_110_EINT6__FUNC_USB_TEST_IO_24 (MTK_PIN_NO(110) | 5) +#define MT8135_PIN_110_EINT6__FUNC_TESTB_OUT27 (MTK_PIN_NO(110) | 6) +#define MT8135_PIN_110_EINT6__FUNC_A_FUNC_DIN_25 (MTK_PIN_NO(110) | 7) + +#define MT8135_PIN_111_EINT7__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT8135_PIN_111_EINT7__FUNC_EINT7 (MTK_PIN_NO(111) | 1) +#define MT8135_PIN_111_EINT7__FUNC_PWM7 (MTK_PIN_NO(111) | 2) +#define MT8135_PIN_111_EINT7__FUNC_CLKM5 (MTK_PIN_NO(111) | 3) +#define MT8135_PIN_111_EINT7__FUNC_GPU_JTDO (MTK_PIN_NO(111) | 4) +#define MT8135_PIN_111_EINT7__FUNC_USB_TEST_IO_25 (MTK_PIN_NO(111) | 5) +#define MT8135_PIN_111_EINT7__FUNC_TESTB_OUT28 (MTK_PIN_NO(111) | 6) +#define MT8135_PIN_111_EINT7__FUNC_A_FUNC_DIN_26 (MTK_PIN_NO(111) | 7) + +#define MT8135_PIN_112_EINT8__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT8135_PIN_112_EINT8__FUNC_EINT8 (MTK_PIN_NO(112) | 1) +#define MT8135_PIN_112_EINT8__FUNC_DISP_PWM (MTK_PIN_NO(112) | 2) +#define MT8135_PIN_112_EINT8__FUNC_CLKM6 (MTK_PIN_NO(112) | 3) +#define MT8135_PIN_112_EINT8__FUNC_GPU_JTDI (MTK_PIN_NO(112) | 4) +#define MT8135_PIN_112_EINT8__FUNC_USB_TEST_IO_26 (MTK_PIN_NO(112) | 5) +#define MT8135_PIN_112_EINT8__FUNC_TESTB_OUT29 (MTK_PIN_NO(112) | 6) +#define MT8135_PIN_112_EINT8__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(112) | 7) + +#define MT8135_PIN_113_EINT9__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT8135_PIN_113_EINT9__FUNC_EINT9 (MTK_PIN_NO(113) | 1) +#define MT8135_PIN_113_EINT9__FUNC_GPU_JTCK (MTK_PIN_NO(113) | 4) +#define MT8135_PIN_113_EINT9__FUNC_USB_DRVVBUS (MTK_PIN_NO(113) | 5) +#define MT8135_PIN_113_EINT9__FUNC_TESTB_OUT30 (MTK_PIN_NO(113) | 6) +#define MT8135_PIN_113_EINT9__FUNC_A_FUNC_DIN_27 (MTK_PIN_NO(113) | 7) + +#define MT8135_PIN_114_LPCE1B__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT8135_PIN_114_LPCE1B__FUNC_LPCE1B (MTK_PIN_NO(114) | 1) +#define MT8135_PIN_114_LPCE1B__FUNC_EINT127 (MTK_PIN_NO(114) | 2) +#define MT8135_PIN_114_LPCE1B__FUNC_PWM2 (MTK_PIN_NO(114) | 5) +#define MT8135_PIN_114_LPCE1B__FUNC_TESTB_OUT14 (MTK_PIN_NO(114) | 6) +#define MT8135_PIN_114_LPCE1B__FUNC_A_FUNC_DIN_28 (MTK_PIN_NO(114) | 7) + +#define MT8135_PIN_115_LPCE0B__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT8135_PIN_115_LPCE0B__FUNC_LPCE0B (MTK_PIN_NO(115) | 1) +#define MT8135_PIN_115_LPCE0B__FUNC_EINT126 (MTK_PIN_NO(115) | 2) +#define MT8135_PIN_115_LPCE0B__FUNC_PWM1 (MTK_PIN_NO(115) | 5) +#define MT8135_PIN_115_LPCE0B__FUNC_TESTB_OUT15 (MTK_PIN_NO(115) | 6) +#define MT8135_PIN_115_LPCE0B__FUNC_A_FUNC_DIN_29 (MTK_PIN_NO(115) | 7) + +#define MT8135_PIN_116_DISP_PWM__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT8135_PIN_116_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(116) | 1) +#define MT8135_PIN_116_DISP_PWM__FUNC_EINT77 (MTK_PIN_NO(116) | 2) +#define MT8135_PIN_116_DISP_PWM__FUNC_LSDI (MTK_PIN_NO(116) | 3) +#define MT8135_PIN_116_DISP_PWM__FUNC_PWM1 (MTK_PIN_NO(116) | 4) +#define MT8135_PIN_116_DISP_PWM__FUNC_PWM2 (MTK_PIN_NO(116) | 5) +#define MT8135_PIN_116_DISP_PWM__FUNC_PWM3 (MTK_PIN_NO(116) | 7) + +#define MT8135_PIN_117_EINT1__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT8135_PIN_117_EINT1__FUNC_EINT1 (MTK_PIN_NO(117) | 1) +#define MT8135_PIN_117_EINT1__FUNC_PWM2 (MTK_PIN_NO(117) | 2) +#define MT8135_PIN_117_EINT1__FUNC_CLKM1 (MTK_PIN_NO(117) | 3) +#define MT8135_PIN_117_EINT1__FUNC_USB_TEST_IO_13 (MTK_PIN_NO(117) | 5) +#define MT8135_PIN_117_EINT1__FUNC_USB_SDA (MTK_PIN_NO(117) | 7) + +#define MT8135_PIN_118_EINT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT8135_PIN_118_EINT2__FUNC_EINT2 (MTK_PIN_NO(118) | 1) +#define MT8135_PIN_118_EINT2__FUNC_PWM3 (MTK_PIN_NO(118) | 2) +#define MT8135_PIN_118_EINT2__FUNC_CLKM2 (MTK_PIN_NO(118) | 3) +#define MT8135_PIN_118_EINT2__FUNC_USB_TEST_IO_14 (MTK_PIN_NO(118) | 5) +#define MT8135_PIN_118_EINT2__FUNC_SRCLKENAI2 (MTK_PIN_NO(118) | 6) +#define MT8135_PIN_118_EINT2__FUNC_A_FUNC_DIN_30 (MTK_PIN_NO(118) | 7) + +#define MT8135_PIN_119_EINT3__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT8135_PIN_119_EINT3__FUNC_EINT3 (MTK_PIN_NO(119) | 1) +#define MT8135_PIN_119_EINT3__FUNC_USB_TEST_IO_15 (MTK_PIN_NO(119) | 5) +#define MT8135_PIN_119_EINT3__FUNC_SRCLKENAI1 (MTK_PIN_NO(119) | 6) +#define MT8135_PIN_119_EINT3__FUNC_EXT_26M_CK (MTK_PIN_NO(119) | 7) + +#define MT8135_PIN_120_EINT4__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT8135_PIN_120_EINT4__FUNC_EINT4 (MTK_PIN_NO(120) | 1) +#define MT8135_PIN_120_EINT4__FUNC_PWM4 (MTK_PIN_NO(120) | 2) +#define MT8135_PIN_120_EINT4__FUNC_USB_DRVVBUS (MTK_PIN_NO(120) | 5) +#define MT8135_PIN_120_EINT4__FUNC_A_FUNC_DIN_31 (MTK_PIN_NO(120) | 7) + +#define MT8135_PIN_121_DPIDE__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT8135_PIN_121_DPIDE__FUNC_DPI0_DE (MTK_PIN_NO(121) | 1) +#define MT8135_PIN_121_DPIDE__FUNC_EINT100 (MTK_PIN_NO(121) | 2) +#define MT8135_PIN_121_DPIDE__FUNC_I2SOUT_DAT (MTK_PIN_NO(121) | 3) +#define MT8135_PIN_121_DPIDE__FUNC_DAC_DAT_OUT (MTK_PIN_NO(121) | 4) +#define MT8135_PIN_121_DPIDE__FUNC_PCM1_DO (MTK_PIN_NO(121) | 5) +#define MT8135_PIN_121_DPIDE__FUNC_IRDA_TXD (MTK_PIN_NO(121) | 6) + +#define MT8135_PIN_122_DPICK__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT8135_PIN_122_DPICK__FUNC_DPI0_CK (MTK_PIN_NO(122) | 1) +#define MT8135_PIN_122_DPICK__FUNC_EINT101 (MTK_PIN_NO(122) | 2) +#define MT8135_PIN_122_DPICK__FUNC_I2SIN_DAT (MTK_PIN_NO(122) | 3) +#define MT8135_PIN_122_DPICK__FUNC_PCM1_DI (MTK_PIN_NO(122) | 5) +#define MT8135_PIN_122_DPICK__FUNC_IRDA_PDN (MTK_PIN_NO(122) | 6) + +#define MT8135_PIN_123_DPIG4__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT8135_PIN_123_DPIG4__FUNC_DPI0_G4 (MTK_PIN_NO(123) | 1) +#define MT8135_PIN_123_DPIG4__FUNC_EINT114 (MTK_PIN_NO(123) | 2) +#define MT8135_PIN_123_DPIG4__FUNC_CM2DAT_2X_0 (MTK_PIN_NO(123) | 4) +#define MT8135_PIN_123_DPIG4__FUNC_DSP2_ID (MTK_PIN_NO(123) | 5) + +#define MT8135_PIN_124_DPIG5__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT8135_PIN_124_DPIG5__FUNC_DPI0_G5 (MTK_PIN_NO(124) | 1) +#define MT8135_PIN_124_DPIG5__FUNC_EINT115 (MTK_PIN_NO(124) | 2) +#define MT8135_PIN_124_DPIG5__FUNC_CM2DAT_2X_1 (MTK_PIN_NO(124) | 4) +#define MT8135_PIN_124_DPIG5__FUNC_DSP2_ICK (MTK_PIN_NO(124) | 5) + +#define MT8135_PIN_125_DPIR3__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT8135_PIN_125_DPIR3__FUNC_DPI0_R3 (MTK_PIN_NO(125) | 1) +#define MT8135_PIN_125_DPIR3__FUNC_EINT121 (MTK_PIN_NO(125) | 2) +#define MT8135_PIN_125_DPIR3__FUNC_CM2DAT_2X_7 (MTK_PIN_NO(125) | 4) + +#define MT8135_PIN_126_DPIG1__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT8135_PIN_126_DPIG1__FUNC_DPI0_G1 (MTK_PIN_NO(126) | 1) +#define MT8135_PIN_126_DPIG1__FUNC_EINT111 (MTK_PIN_NO(126) | 2) +#define MT8135_PIN_126_DPIG1__FUNC_DSP1_ICK (MTK_PIN_NO(126) | 5) + +#define MT8135_PIN_127_DPIVSYNC__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define MT8135_PIN_127_DPIVSYNC__FUNC_DPI0_VSYNC (MTK_PIN_NO(127) | 1) +#define MT8135_PIN_127_DPIVSYNC__FUNC_EINT98 (MTK_PIN_NO(127) | 2) +#define MT8135_PIN_127_DPIVSYNC__FUNC_I2SIN_CK (MTK_PIN_NO(127) | 3) +#define MT8135_PIN_127_DPIVSYNC__FUNC_DAC_CK (MTK_PIN_NO(127) | 4) +#define MT8135_PIN_127_DPIVSYNC__FUNC_PCM1_CK (MTK_PIN_NO(127) | 5) + +#define MT8135_PIN_128_DPIHSYNC__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define MT8135_PIN_128_DPIHSYNC__FUNC_DPI0_HSYNC (MTK_PIN_NO(128) | 1) +#define MT8135_PIN_128_DPIHSYNC__FUNC_EINT99 (MTK_PIN_NO(128) | 2) +#define MT8135_PIN_128_DPIHSYNC__FUNC_I2SIN_WS (MTK_PIN_NO(128) | 3) +#define MT8135_PIN_128_DPIHSYNC__FUNC_DAC_WS (MTK_PIN_NO(128) | 4) +#define MT8135_PIN_128_DPIHSYNC__FUNC_PCM1_WS (MTK_PIN_NO(128) | 5) +#define MT8135_PIN_128_DPIHSYNC__FUNC_IRDA_RXD (MTK_PIN_NO(128) | 6) + +#define MT8135_PIN_129_DPIB0__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define MT8135_PIN_129_DPIB0__FUNC_DPI0_B0 (MTK_PIN_NO(129) | 1) +#define MT8135_PIN_129_DPIB0__FUNC_EINT102 (MTK_PIN_NO(129) | 2) +#define MT8135_PIN_129_DPIB0__FUNC_SCL0 (MTK_PIN_NO(129) | 4) +#define MT8135_PIN_129_DPIB0__FUNC_DISP_PWM (MTK_PIN_NO(129) | 5) + +#define MT8135_PIN_130_DPIB1__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define MT8135_PIN_130_DPIB1__FUNC_DPI0_B1 (MTK_PIN_NO(130) | 1) +#define MT8135_PIN_130_DPIB1__FUNC_EINT103 (MTK_PIN_NO(130) | 2) +#define MT8135_PIN_130_DPIB1__FUNC_CLKM0 (MTK_PIN_NO(130) | 3) +#define MT8135_PIN_130_DPIB1__FUNC_SDA0 (MTK_PIN_NO(130) | 4) +#define MT8135_PIN_130_DPIB1__FUNC_PWM1 (MTK_PIN_NO(130) | 5) + +#define MT8135_PIN_131_DPIB2__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define MT8135_PIN_131_DPIB2__FUNC_DPI0_B2 (MTK_PIN_NO(131) | 1) +#define MT8135_PIN_131_DPIB2__FUNC_EINT104 (MTK_PIN_NO(131) | 2) +#define MT8135_PIN_131_DPIB2__FUNC_CLKM1 (MTK_PIN_NO(131) | 3) +#define MT8135_PIN_131_DPIB2__FUNC_SCL1 (MTK_PIN_NO(131) | 4) +#define MT8135_PIN_131_DPIB2__FUNC_PWM2 (MTK_PIN_NO(131) | 5) + +#define MT8135_PIN_132_DPIB3__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define MT8135_PIN_132_DPIB3__FUNC_DPI0_B3 (MTK_PIN_NO(132) | 1) +#define MT8135_PIN_132_DPIB3__FUNC_EINT105 (MTK_PIN_NO(132) | 2) +#define MT8135_PIN_132_DPIB3__FUNC_CLKM2 (MTK_PIN_NO(132) | 3) +#define MT8135_PIN_132_DPIB3__FUNC_SDA1 (MTK_PIN_NO(132) | 4) +#define MT8135_PIN_132_DPIB3__FUNC_PWM3 (MTK_PIN_NO(132) | 5) + +#define MT8135_PIN_133_DPIB4__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define MT8135_PIN_133_DPIB4__FUNC_DPI0_B4 (MTK_PIN_NO(133) | 1) +#define MT8135_PIN_133_DPIB4__FUNC_EINT106 (MTK_PIN_NO(133) | 2) +#define MT8135_PIN_133_DPIB4__FUNC_CLKM3 (MTK_PIN_NO(133) | 3) +#define MT8135_PIN_133_DPIB4__FUNC_SCL2 (MTK_PIN_NO(133) | 4) +#define MT8135_PIN_133_DPIB4__FUNC_PWM4 (MTK_PIN_NO(133) | 5) + +#define MT8135_PIN_134_DPIB5__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define MT8135_PIN_134_DPIB5__FUNC_DPI0_B5 (MTK_PIN_NO(134) | 1) +#define MT8135_PIN_134_DPIB5__FUNC_EINT107 (MTK_PIN_NO(134) | 2) +#define MT8135_PIN_134_DPIB5__FUNC_CLKM4 (MTK_PIN_NO(134) | 3) +#define MT8135_PIN_134_DPIB5__FUNC_SDA2 (MTK_PIN_NO(134) | 4) +#define MT8135_PIN_134_DPIB5__FUNC_PWM5 (MTK_PIN_NO(134) | 5) + +#define MT8135_PIN_135_DPIB6__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define MT8135_PIN_135_DPIB6__FUNC_DPI0_B6 (MTK_PIN_NO(135) | 1) +#define MT8135_PIN_135_DPIB6__FUNC_EINT108 (MTK_PIN_NO(135) | 2) +#define MT8135_PIN_135_DPIB6__FUNC_CLKM5 (MTK_PIN_NO(135) | 3) +#define MT8135_PIN_135_DPIB6__FUNC_SCL3 (MTK_PIN_NO(135) | 4) +#define MT8135_PIN_135_DPIB6__FUNC_PWM6 (MTK_PIN_NO(135) | 5) + +#define MT8135_PIN_136_DPIB7__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define MT8135_PIN_136_DPIB7__FUNC_DPI0_B7 (MTK_PIN_NO(136) | 1) +#define MT8135_PIN_136_DPIB7__FUNC_EINT109 (MTK_PIN_NO(136) | 2) +#define MT8135_PIN_136_DPIB7__FUNC_CLKM6 (MTK_PIN_NO(136) | 3) +#define MT8135_PIN_136_DPIB7__FUNC_SDA3 (MTK_PIN_NO(136) | 4) +#define MT8135_PIN_136_DPIB7__FUNC_PWM7 (MTK_PIN_NO(136) | 5) + +#define MT8135_PIN_137_DPIG0__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define MT8135_PIN_137_DPIG0__FUNC_DPI0_G0 (MTK_PIN_NO(137) | 1) +#define MT8135_PIN_137_DPIG0__FUNC_EINT110 (MTK_PIN_NO(137) | 2) +#define MT8135_PIN_137_DPIG0__FUNC_DSP1_ID (MTK_PIN_NO(137) | 5) + +#define MT8135_PIN_138_DPIG2__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define MT8135_PIN_138_DPIG2__FUNC_DPI0_G2 (MTK_PIN_NO(138) | 1) +#define MT8135_PIN_138_DPIG2__FUNC_EINT112 (MTK_PIN_NO(138) | 2) +#define MT8135_PIN_138_DPIG2__FUNC_DSP1_IMS (MTK_PIN_NO(138) | 5) + +#define MT8135_PIN_139_DPIG3__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define MT8135_PIN_139_DPIG3__FUNC_DPI0_G3 (MTK_PIN_NO(139) | 1) +#define MT8135_PIN_139_DPIG3__FUNC_EINT113 (MTK_PIN_NO(139) | 2) +#define MT8135_PIN_139_DPIG3__FUNC_DSP2_IMS (MTK_PIN_NO(139) | 5) + +#define MT8135_PIN_140_DPIG6__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define MT8135_PIN_140_DPIG6__FUNC_DPI0_G6 (MTK_PIN_NO(140) | 1) +#define MT8135_PIN_140_DPIG6__FUNC_EINT116 (MTK_PIN_NO(140) | 2) +#define MT8135_PIN_140_DPIG6__FUNC_CM2DAT_2X_2 (MTK_PIN_NO(140) | 4) + +#define MT8135_PIN_141_DPIG7__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define MT8135_PIN_141_DPIG7__FUNC_DPI0_G7 (MTK_PIN_NO(141) | 1) +#define MT8135_PIN_141_DPIG7__FUNC_EINT117 (MTK_PIN_NO(141) | 2) +#define MT8135_PIN_141_DPIG7__FUNC_CM2DAT_2X_3 (MTK_PIN_NO(141) | 4) + +#define MT8135_PIN_142_DPIR0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define MT8135_PIN_142_DPIR0__FUNC_DPI0_R0 (MTK_PIN_NO(142) | 1) +#define MT8135_PIN_142_DPIR0__FUNC_EINT118 (MTK_PIN_NO(142) | 2) +#define MT8135_PIN_142_DPIR0__FUNC_CM2DAT_2X_4 (MTK_PIN_NO(142) | 4) + +#define MT8135_PIN_143_DPIR1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define MT8135_PIN_143_DPIR1__FUNC_DPI0_R1 (MTK_PIN_NO(143) | 1) +#define MT8135_PIN_143_DPIR1__FUNC_EINT119 (MTK_PIN_NO(143) | 2) +#define MT8135_PIN_143_DPIR1__FUNC_CM2DAT_2X_5 (MTK_PIN_NO(143) | 4) + +#define MT8135_PIN_144_DPIR2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define MT8135_PIN_144_DPIR2__FUNC_DPI0_R2 (MTK_PIN_NO(144) | 1) +#define MT8135_PIN_144_DPIR2__FUNC_EINT120 (MTK_PIN_NO(144) | 2) +#define MT8135_PIN_144_DPIR2__FUNC_CM2DAT_2X_6 (MTK_PIN_NO(144) | 4) + +#define MT8135_PIN_145_DPIR4__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define MT8135_PIN_145_DPIR4__FUNC_DPI0_R4 (MTK_PIN_NO(145) | 1) +#define MT8135_PIN_145_DPIR4__FUNC_EINT122 (MTK_PIN_NO(145) | 2) +#define MT8135_PIN_145_DPIR4__FUNC_CM2DAT_2X_8 (MTK_PIN_NO(145) | 4) + +#define MT8135_PIN_146_DPIR5__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define MT8135_PIN_146_DPIR5__FUNC_DPI0_R5 (MTK_PIN_NO(146) | 1) +#define MT8135_PIN_146_DPIR5__FUNC_EINT123 (MTK_PIN_NO(146) | 2) +#define MT8135_PIN_146_DPIR5__FUNC_CM2DAT_2X_9 (MTK_PIN_NO(146) | 4) + +#define MT8135_PIN_147_DPIR6__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define MT8135_PIN_147_DPIR6__FUNC_DPI0_R6 (MTK_PIN_NO(147) | 1) +#define MT8135_PIN_147_DPIR6__FUNC_EINT124 (MTK_PIN_NO(147) | 2) +#define MT8135_PIN_147_DPIR6__FUNC_CM2VSYNC_2X (MTK_PIN_NO(147) | 4) + +#define MT8135_PIN_148_DPIR7__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define MT8135_PIN_148_DPIR7__FUNC_DPI0_R7 (MTK_PIN_NO(148) | 1) +#define MT8135_PIN_148_DPIR7__FUNC_EINT125 (MTK_PIN_NO(148) | 2) +#define MT8135_PIN_148_DPIR7__FUNC_CM2HSYNC_2X (MTK_PIN_NO(148) | 4) + +#define MT8135_PIN_149_TDN3__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define MT8135_PIN_149_TDN3__FUNC_EINT36 (MTK_PIN_NO(149) | 2) + +#define MT8135_PIN_150_TDP3__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define MT8135_PIN_150_TDP3__FUNC_EINT35 (MTK_PIN_NO(150) | 2) + +#define MT8135_PIN_151_TDN2__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define MT8135_PIN_151_TDN2__FUNC_EINT169 (MTK_PIN_NO(151) | 2) + +#define MT8135_PIN_152_TDP2__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define MT8135_PIN_152_TDP2__FUNC_EINT168 (MTK_PIN_NO(152) | 2) + +#define MT8135_PIN_153_TCN__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define MT8135_PIN_153_TCN__FUNC_EINT163 (MTK_PIN_NO(153) | 2) + +#define MT8135_PIN_154_TCP__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define MT8135_PIN_154_TCP__FUNC_EINT162 (MTK_PIN_NO(154) | 2) + +#define MT8135_PIN_155_TDN1__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define MT8135_PIN_155_TDN1__FUNC_EINT167 (MTK_PIN_NO(155) | 2) + +#define MT8135_PIN_156_TDP1__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define MT8135_PIN_156_TDP1__FUNC_EINT166 (MTK_PIN_NO(156) | 2) + +#define MT8135_PIN_157_TDN0__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define MT8135_PIN_157_TDN0__FUNC_EINT165 (MTK_PIN_NO(157) | 2) + +#define MT8135_PIN_158_TDP0__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define MT8135_PIN_158_TDP0__FUNC_EINT164 (MTK_PIN_NO(158) | 2) + +#define MT8135_PIN_159_RDN3__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define MT8135_PIN_159_RDN3__FUNC_EINT18 (MTK_PIN_NO(159) | 2) + +#define MT8135_PIN_160_RDP3__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define MT8135_PIN_160_RDP3__FUNC_EINT30 (MTK_PIN_NO(160) | 2) + +#define MT8135_PIN_161_RDN2__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define MT8135_PIN_161_RDN2__FUNC_EINT31 (MTK_PIN_NO(161) | 2) + +#define MT8135_PIN_162_RDP2__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define MT8135_PIN_162_RDP2__FUNC_EINT32 (MTK_PIN_NO(162) | 2) + +#define MT8135_PIN_163_RCN__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define MT8135_PIN_163_RCN__FUNC_EINT33 (MTK_PIN_NO(163) | 2) + +#define MT8135_PIN_164_RCP__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define MT8135_PIN_164_RCP__FUNC_EINT39 (MTK_PIN_NO(164) | 2) + +#define MT8135_PIN_165_RDN1__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) + +#define MT8135_PIN_166_RDP1__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) + +#define MT8135_PIN_167_RDN0__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) + +#define MT8135_PIN_168_RDP0__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) + +#define MT8135_PIN_169_RDN1_A__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define MT8135_PIN_169_RDN1_A__FUNC_CMDAT6 (MTK_PIN_NO(169) | 1) +#define MT8135_PIN_169_RDN1_A__FUNC_EINT175 (MTK_PIN_NO(169) | 2) + +#define MT8135_PIN_170_RDP1_A__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define MT8135_PIN_170_RDP1_A__FUNC_CMDAT7 (MTK_PIN_NO(170) | 1) +#define MT8135_PIN_170_RDP1_A__FUNC_EINT174 (MTK_PIN_NO(170) | 2) + +#define MT8135_PIN_171_RCN_A__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define MT8135_PIN_171_RCN_A__FUNC_CMDAT8 (MTK_PIN_NO(171) | 1) +#define MT8135_PIN_171_RCN_A__FUNC_EINT171 (MTK_PIN_NO(171) | 2) + +#define MT8135_PIN_172_RCP_A__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define MT8135_PIN_172_RCP_A__FUNC_CMDAT9 (MTK_PIN_NO(172) | 1) +#define MT8135_PIN_172_RCP_A__FUNC_EINT170 (MTK_PIN_NO(172) | 2) + +#define MT8135_PIN_173_RDN0_A__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define MT8135_PIN_173_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(173) | 1) +#define MT8135_PIN_173_RDN0_A__FUNC_EINT173 (MTK_PIN_NO(173) | 2) + +#define MT8135_PIN_174_RDP0_A__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define MT8135_PIN_174_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(174) | 1) +#define MT8135_PIN_174_RDP0_A__FUNC_EINT172 (MTK_PIN_NO(174) | 2) + +#define MT8135_PIN_175_RDN1_B__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define MT8135_PIN_175_RDN1_B__FUNC_CMDAT2 (MTK_PIN_NO(175) | 1) +#define MT8135_PIN_175_RDN1_B__FUNC_EINT181 (MTK_PIN_NO(175) | 2) +#define MT8135_PIN_175_RDN1_B__FUNC_CMCSD2 (MTK_PIN_NO(175) | 3) + +#define MT8135_PIN_176_RDP1_B__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) +#define MT8135_PIN_176_RDP1_B__FUNC_CMDAT3 (MTK_PIN_NO(176) | 1) +#define MT8135_PIN_176_RDP1_B__FUNC_EINT180 (MTK_PIN_NO(176) | 2) +#define MT8135_PIN_176_RDP1_B__FUNC_CMCSD3 (MTK_PIN_NO(176) | 3) + +#define MT8135_PIN_177_RCN_B__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) +#define MT8135_PIN_177_RCN_B__FUNC_CMDAT4 (MTK_PIN_NO(177) | 1) +#define MT8135_PIN_177_RCN_B__FUNC_EINT177 (MTK_PIN_NO(177) | 2) + +#define MT8135_PIN_178_RCP_B__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) +#define MT8135_PIN_178_RCP_B__FUNC_CMDAT5 (MTK_PIN_NO(178) | 1) +#define MT8135_PIN_178_RCP_B__FUNC_EINT176 (MTK_PIN_NO(178) | 2) + +#define MT8135_PIN_179_RDN0_B__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) +#define MT8135_PIN_179_RDN0_B__FUNC_CMDAT0 (MTK_PIN_NO(179) | 1) +#define MT8135_PIN_179_RDN0_B__FUNC_EINT179 (MTK_PIN_NO(179) | 2) +#define MT8135_PIN_179_RDN0_B__FUNC_CMCSD0 (MTK_PIN_NO(179) | 3) + +#define MT8135_PIN_180_RDP0_B__FUNC_GPIO180 (MTK_PIN_NO(180) | 0) +#define MT8135_PIN_180_RDP0_B__FUNC_CMDAT1 (MTK_PIN_NO(180) | 1) +#define MT8135_PIN_180_RDP0_B__FUNC_EINT178 (MTK_PIN_NO(180) | 2) +#define MT8135_PIN_180_RDP0_B__FUNC_CMCSD1 (MTK_PIN_NO(180) | 3) + +#define MT8135_PIN_181_CMPCLK__FUNC_GPIO181 (MTK_PIN_NO(181) | 0) +#define MT8135_PIN_181_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(181) | 1) +#define MT8135_PIN_181_CMPCLK__FUNC_EINT182 (MTK_PIN_NO(181) | 2) +#define MT8135_PIN_181_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(181) | 3) +#define MT8135_PIN_181_CMPCLK__FUNC_CM2MCLK_4X (MTK_PIN_NO(181) | 4) +#define MT8135_PIN_181_CMPCLK__FUNC_TS_AUXADC_SEL_3 (MTK_PIN_NO(181) | 5) +#define MT8135_PIN_181_CMPCLK__FUNC_VENC_TEST_CK (MTK_PIN_NO(181) | 6) +#define MT8135_PIN_181_CMPCLK__FUNC_TESTA_OUT27 (MTK_PIN_NO(181) | 7) + +#define MT8135_PIN_182_CMMCLK__FUNC_GPIO182 (MTK_PIN_NO(182) | 0) +#define MT8135_PIN_182_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(182) | 1) +#define MT8135_PIN_182_CMMCLK__FUNC_EINT183 (MTK_PIN_NO(182) | 2) +#define MT8135_PIN_182_CMMCLK__FUNC_TS_AUXADC_SEL_2 (MTK_PIN_NO(182) | 5) +#define MT8135_PIN_182_CMMCLK__FUNC_TESTA_OUT28 (MTK_PIN_NO(182) | 7) + +#define MT8135_PIN_183_CMRST__FUNC_GPIO183 (MTK_PIN_NO(183) | 0) +#define MT8135_PIN_183_CMRST__FUNC_CMRST (MTK_PIN_NO(183) | 1) +#define MT8135_PIN_183_CMRST__FUNC_EINT185 (MTK_PIN_NO(183) | 2) +#define MT8135_PIN_183_CMRST__FUNC_TS_AUXADC_SEL_1 (MTK_PIN_NO(183) | 5) +#define MT8135_PIN_183_CMRST__FUNC_TESTA_OUT30 (MTK_PIN_NO(183) | 7) + +#define MT8135_PIN_184_CMPDN__FUNC_GPIO184 (MTK_PIN_NO(184) | 0) +#define MT8135_PIN_184_CMPDN__FUNC_CMPDN (MTK_PIN_NO(184) | 1) +#define MT8135_PIN_184_CMPDN__FUNC_EINT184 (MTK_PIN_NO(184) | 2) +#define MT8135_PIN_184_CMPDN__FUNC_TS_AUXADC_SEL_0 (MTK_PIN_NO(184) | 5) +#define MT8135_PIN_184_CMPDN__FUNC_TESTA_OUT29 (MTK_PIN_NO(184) | 7) + +#define MT8135_PIN_185_CMFLASH__FUNC_GPIO185 (MTK_PIN_NO(185) | 0) +#define MT8135_PIN_185_CMFLASH__FUNC_CMFLASH (MTK_PIN_NO(185) | 1) +#define MT8135_PIN_185_CMFLASH__FUNC_EINT186 (MTK_PIN_NO(185) | 2) +#define MT8135_PIN_185_CMFLASH__FUNC_CM2MCLK_3X (MTK_PIN_NO(185) | 3) +#define MT8135_PIN_185_CMFLASH__FUNC_MFG_TEST_CK_1 (MTK_PIN_NO(185) | 6) +#define MT8135_PIN_185_CMFLASH__FUNC_TESTA_OUT31 (MTK_PIN_NO(185) | 7) + +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_GPIO186 (MTK_PIN_NO(186) | 0) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_MRG_I2S_P_CLK (MTK_PIN_NO(186) | 1) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_EINT14 (MTK_PIN_NO(186) | 2) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(186) | 3) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_PCM0_CK (MTK_PIN_NO(186) | 4) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_DSP2_ICK (MTK_PIN_NO(186) | 5) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_IMG_TEST_CK (MTK_PIN_NO(186) | 6) +#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_USB_SCL (MTK_PIN_NO(186) | 7) + +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_GPIO187 (MTK_PIN_NO(187) | 0) +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_MRG_I2S_SYNC (MTK_PIN_NO(187) | 1) +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_EINT16 (MTK_PIN_NO(187) | 2) +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_I2SIN_WS (MTK_PIN_NO(187) | 3) +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_PCM0_WS (MTK_PIN_NO(187) | 4) +#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_DISP_TEST_CK (MTK_PIN_NO(187) | 6) + +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_GPIO188 (MTK_PIN_NO(188) | 0) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MRG_I2S_PCM_RX (MTK_PIN_NO(188) | 1) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_EINT15 (MTK_PIN_NO(188) | 2) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_I2SIN_DAT (MTK_PIN_NO(188) | 3) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_PCM0_DI (MTK_PIN_NO(188) | 4) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_DSP2_ID (MTK_PIN_NO(188) | 5) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MFG_TEST_CK (MTK_PIN_NO(188) | 6) +#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_USB_SDA (MTK_PIN_NO(188) | 7) + +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_GPIO189 (MTK_PIN_NO(189) | 0) +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_MRG_I2S_PCM_TX (MTK_PIN_NO(189) | 1) +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_EINT17 (MTK_PIN_NO(189) | 2) +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_I2SOUT_DAT (MTK_PIN_NO(189) | 3) +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_PCM0_DO (MTK_PIN_NO(189) | 4) +#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_VDEC_TEST_CK (MTK_PIN_NO(189) | 6) + +#define MT8135_PIN_190_SRCLKENAI__FUNC_GPIO190 (MTK_PIN_NO(190) | 0) +#define MT8135_PIN_190_SRCLKENAI__FUNC_SRCLKENAI (MTK_PIN_NO(190) | 1) + +#define MT8135_PIN_191_URXD3__FUNC_GPIO191 (MTK_PIN_NO(191) | 0) +#define MT8135_PIN_191_URXD3__FUNC_URXD3 (MTK_PIN_NO(191) | 1) +#define MT8135_PIN_191_URXD3__FUNC_EINT87 (MTK_PIN_NO(191) | 2) +#define MT8135_PIN_191_URXD3__FUNC_UTXD3 (MTK_PIN_NO(191) | 3) +#define MT8135_PIN_191_URXD3__FUNC_TS_AUX_ST (MTK_PIN_NO(191) | 5) +#define MT8135_PIN_191_URXD3__FUNC_PWM4 (MTK_PIN_NO(191) | 6) + +#define MT8135_PIN_192_UTXD3__FUNC_GPIO192 (MTK_PIN_NO(192) | 0) +#define MT8135_PIN_192_UTXD3__FUNC_UTXD3 (MTK_PIN_NO(192) | 1) +#define MT8135_PIN_192_UTXD3__FUNC_EINT86 (MTK_PIN_NO(192) | 2) +#define MT8135_PIN_192_UTXD3__FUNC_URXD3 (MTK_PIN_NO(192) | 3) +#define MT8135_PIN_192_UTXD3__FUNC_TS_AUX_CS_B (MTK_PIN_NO(192) | 5) +#define MT8135_PIN_192_UTXD3__FUNC_PWM3 (MTK_PIN_NO(192) | 6) + +#define MT8135_PIN_193_SDA2__FUNC_GPIO193 (MTK_PIN_NO(193) | 0) +#define MT8135_PIN_193_SDA2__FUNC_SDA2 (MTK_PIN_NO(193) | 1) +#define MT8135_PIN_193_SDA2__FUNC_EINT95 (MTK_PIN_NO(193) | 2) +#define MT8135_PIN_193_SDA2__FUNC_CLKM5 (MTK_PIN_NO(193) | 3) +#define MT8135_PIN_193_SDA2__FUNC_PWM5 (MTK_PIN_NO(193) | 4) +#define MT8135_PIN_193_SDA2__FUNC_TS_AUX_PWDB (MTK_PIN_NO(193) | 5) + +#define MT8135_PIN_194_SCL2__FUNC_GPIO194 (MTK_PIN_NO(194) | 0) +#define MT8135_PIN_194_SCL2__FUNC_SCL2 (MTK_PIN_NO(194) | 1) +#define MT8135_PIN_194_SCL2__FUNC_EINT94 (MTK_PIN_NO(194) | 2) +#define MT8135_PIN_194_SCL2__FUNC_CLKM4 (MTK_PIN_NO(194) | 3) +#define MT8135_PIN_194_SCL2__FUNC_PWM4 (MTK_PIN_NO(194) | 4) +#define MT8135_PIN_194_SCL2__FUNC_TS_AUXADC_TEST_CK (MTK_PIN_NO(194) | 5) + +#define MT8135_PIN_195_SDA1__FUNC_GPIO195 (MTK_PIN_NO(195) | 0) +#define MT8135_PIN_195_SDA1__FUNC_SDA1 (MTK_PIN_NO(195) | 1) +#define MT8135_PIN_195_SDA1__FUNC_EINT93 (MTK_PIN_NO(195) | 2) +#define MT8135_PIN_195_SDA1__FUNC_CLKM3 (MTK_PIN_NO(195) | 3) +#define MT8135_PIN_195_SDA1__FUNC_PWM3 (MTK_PIN_NO(195) | 4) +#define MT8135_PIN_195_SDA1__FUNC_TS_AUX_SCLK_PWDB (MTK_PIN_NO(195) | 5) + +#define MT8135_PIN_196_SCL1__FUNC_GPIO196 (MTK_PIN_NO(196) | 0) +#define MT8135_PIN_196_SCL1__FUNC_SCL1 (MTK_PIN_NO(196) | 1) +#define MT8135_PIN_196_SCL1__FUNC_EINT92 (MTK_PIN_NO(196) | 2) +#define MT8135_PIN_196_SCL1__FUNC_CLKM2 (MTK_PIN_NO(196) | 3) +#define MT8135_PIN_196_SCL1__FUNC_PWM2 (MTK_PIN_NO(196) | 4) +#define MT8135_PIN_196_SCL1__FUNC_TS_AUX_DIN (MTK_PIN_NO(196) | 5) + +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_GPIO197 (MTK_PIN_NO(197) | 0) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MSDC3_DAT2 (MTK_PIN_NO(197) | 1) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_EINT71 (MTK_PIN_NO(197) | 2) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_SCL6 (MTK_PIN_NO(197) | 3) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_PWM5 (MTK_PIN_NO(197) | 4) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_CLKM4 (MTK_PIN_NO(197) | 5) +#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MFG_TEST_CK_2 (MTK_PIN_NO(197) | 6) + +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_GPIO198 (MTK_PIN_NO(198) | 0) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MSDC3_DAT3 (MTK_PIN_NO(198) | 1) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_EINT72 (MTK_PIN_NO(198) | 2) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_SDA6 (MTK_PIN_NO(198) | 3) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_PWM6 (MTK_PIN_NO(198) | 4) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_CLKM5 (MTK_PIN_NO(198) | 5) +#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MFG_TEST_CK_3 (MTK_PIN_NO(198) | 6) + +#define MT8135_PIN_199_MSDC3_CMD__FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_MSDC3_CMD (MTK_PIN_NO(199) | 1) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_EINT68 (MTK_PIN_NO(199) | 2) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_SDA2 (MTK_PIN_NO(199) | 3) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_PWM2 (MTK_PIN_NO(199) | 4) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_CLKM1 (MTK_PIN_NO(199) | 5) +#define MT8135_PIN_199_MSDC3_CMD__FUNC_MFG_TEST_CK_4 (MTK_PIN_NO(199) | 6) + +#define MT8135_PIN_200_MSDC3_CLK__FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT8135_PIN_200_MSDC3_CLK__FUNC_MSDC3_CLK (MTK_PIN_NO(200) | 1) +#define MT8135_PIN_200_MSDC3_CLK__FUNC_EINT67 (MTK_PIN_NO(200) | 2) +#define MT8135_PIN_200_MSDC3_CLK__FUNC_SCL2 (MTK_PIN_NO(200) | 3) +#define MT8135_PIN_200_MSDC3_CLK__FUNC_PWM1 (MTK_PIN_NO(200) | 4) +#define MT8135_PIN_200_MSDC3_CLK__FUNC_CLKM0 (MTK_PIN_NO(200) | 5) + +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_MSDC3_DAT1 (MTK_PIN_NO(201) | 1) +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_EINT70 (MTK_PIN_NO(201) | 2) +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_SDA3 (MTK_PIN_NO(201) | 3) +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_PWM4 (MTK_PIN_NO(201) | 4) +#define MT8135_PIN_201_MSDC3_DAT1__FUNC_CLKM3 (MTK_PIN_NO(201) | 5) + +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_GPIO202 (MTK_PIN_NO(202) | 0) +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_MSDC3_DAT0 (MTK_PIN_NO(202) | 1) +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_EINT69 (MTK_PIN_NO(202) | 2) +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_SCL3 (MTK_PIN_NO(202) | 3) +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_PWM3 (MTK_PIN_NO(202) | 4) +#define MT8135_PIN_202_MSDC3_DAT0__FUNC_CLKM2 (MTK_PIN_NO(202) | 5) + +#endif /* __DTS_MT8135_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/mt8183-pinfunc.h b/include/dt-bindings/pinctrl/mt8183-pinfunc.h new file mode 100644 index 000000000000..6221cd712718 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt8183-pinfunc.h @@ -0,0 +1,1120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 MediaTek Inc. + * Author: Zhiyong Tao <zhiyong.tao@mediatek.com> + * + */ + +#ifndef __MT8183_PINFUNC_H +#define __MT8183_PINFUNC_H + +#include <dt-bindings/pinctrl/mt65xx.h> + +#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define PINMUX_GPIO0__FUNC_MRG_SYNC (MTK_PIN_NO(0) | 1) +#define PINMUX_GPIO0__FUNC_PCM0_SYNC (MTK_PIN_NO(0) | 2) +#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 3) +#define PINMUX_GPIO0__FUNC_SRCLKENAI0 (MTK_PIN_NO(0) | 4) +#define PINMUX_GPIO0__FUNC_SCP_SPI2_CS (MTK_PIN_NO(0) | 5) +#define PINMUX_GPIO0__FUNC_I2S3_MCK (MTK_PIN_NO(0) | 6) +#define PINMUX_GPIO0__FUNC_SPI2_CSB (MTK_PIN_NO(0) | 7) + +#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define PINMUX_GPIO1__FUNC_MRG_CLK (MTK_PIN_NO(1) | 1) +#define PINMUX_GPIO1__FUNC_PCM0_CLK (MTK_PIN_NO(1) | 2) +#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 3) +#define PINMUX_GPIO1__FUNC_CLKM3 (MTK_PIN_NO(1) | 4) +#define PINMUX_GPIO1__FUNC_SCP_SPI2_MO (MTK_PIN_NO(1) | 5) +#define PINMUX_GPIO1__FUNC_I2S3_BCK (MTK_PIN_NO(1) | 6) +#define PINMUX_GPIO1__FUNC_SPI2_MO (MTK_PIN_NO(1) | 7) + +#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define PINMUX_GPIO2__FUNC_MRG_DO (MTK_PIN_NO(2) | 1) +#define PINMUX_GPIO2__FUNC_PCM0_DO (MTK_PIN_NO(2) | 2) +#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 3) +#define PINMUX_GPIO2__FUNC_SCL6 (MTK_PIN_NO(2) | 4) +#define PINMUX_GPIO2__FUNC_SCP_SPI2_CK (MTK_PIN_NO(2) | 5) +#define PINMUX_GPIO2__FUNC_I2S3_LRCK (MTK_PIN_NO(2) | 6) +#define PINMUX_GPIO2__FUNC_SPI2_CLK (MTK_PIN_NO(2) | 7) + +#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define PINMUX_GPIO3__FUNC_MRG_DI (MTK_PIN_NO(3) | 1) +#define PINMUX_GPIO3__FUNC_PCM0_DI (MTK_PIN_NO(3) | 2) +#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 3) +#define PINMUX_GPIO3__FUNC_SDA6 (MTK_PIN_NO(3) | 4) +#define PINMUX_GPIO3__FUNC_TDM_MCK (MTK_PIN_NO(3) | 5) +#define PINMUX_GPIO3__FUNC_I2S3_DO (MTK_PIN_NO(3) | 6) +#define PINMUX_GPIO3__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(3) | 7) + +#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define PINMUX_GPIO4__FUNC_PWM_B (MTK_PIN_NO(4) | 1) +#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2) +#define PINMUX_GPIO4__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(4) | 3) +#define PINMUX_GPIO4__FUNC_MD_URXD1 (MTK_PIN_NO(4) | 4) +#define PINMUX_GPIO4__FUNC_TDM_BCK (MTK_PIN_NO(4) | 5) +#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6) +#define PINMUX_GPIO4__FUNC_DAP_MD32_SWD (MTK_PIN_NO(4) | 7) + +#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define PINMUX_GPIO5__FUNC_PWM_C (MTK_PIN_NO(5) | 1) +#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2) +#define PINMUX_GPIO5__FUNC_SSPM_URXD_AO (MTK_PIN_NO(5) | 3) +#define PINMUX_GPIO5__FUNC_MD_UTXD1 (MTK_PIN_NO(5) | 4) +#define PINMUX_GPIO5__FUNC_TDM_LRCK (MTK_PIN_NO(5) | 5) +#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6) +#define PINMUX_GPIO5__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(5) | 7) + +#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define PINMUX_GPIO6__FUNC_PWM_A (MTK_PIN_NO(6) | 1) +#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2) +#define PINMUX_GPIO6__FUNC_IDDIG (MTK_PIN_NO(6) | 3) +#define PINMUX_GPIO6__FUNC_MD_URXD0 (MTK_PIN_NO(6) | 4) +#define PINMUX_GPIO6__FUNC_TDM_DATA0 (MTK_PIN_NO(6) | 5) +#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6) +#define PINMUX_GPIO6__FUNC_CMFLASH (MTK_PIN_NO(6) | 7) + +#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define PINMUX_GPIO7__FUNC_SPI1_B_MI (MTK_PIN_NO(7) | 1) +#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2) +#define PINMUX_GPIO7__FUNC_USB_DRVVBUS (MTK_PIN_NO(7) | 3) +#define PINMUX_GPIO7__FUNC_MD_UTXD0 (MTK_PIN_NO(7) | 4) +#define PINMUX_GPIO7__FUNC_TDM_DATA1 (MTK_PIN_NO(7) | 5) +#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6) +#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 7) + +#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define PINMUX_GPIO8__FUNC_SPI1_B_CSB (MTK_PIN_NO(8) | 1) +#define PINMUX_GPIO8__FUNC_ANT_SEL3 (MTK_PIN_NO(8) | 2) +#define PINMUX_GPIO8__FUNC_SCL7 (MTK_PIN_NO(8) | 3) +#define PINMUX_GPIO8__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(8) | 4) +#define PINMUX_GPIO8__FUNC_TDM_DATA2 (MTK_PIN_NO(8) | 5) +#define PINMUX_GPIO8__FUNC_MD_INT0 (MTK_PIN_NO(8) | 6) +#define PINMUX_GPIO8__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(8) | 7) + +#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define PINMUX_GPIO9__FUNC_SPI1_B_MO (MTK_PIN_NO(9) | 1) +#define PINMUX_GPIO9__FUNC_ANT_SEL4 (MTK_PIN_NO(9) | 2) +#define PINMUX_GPIO9__FUNC_CMMCLK2 (MTK_PIN_NO(9) | 3) +#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(9) | 4) +#define PINMUX_GPIO9__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(9) | 5) +#define PINMUX_GPIO9__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(9) | 6) +#define PINMUX_GPIO9__FUNC_DBG_MON_B10 (MTK_PIN_NO(9) | 7) + +#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define PINMUX_GPIO10__FUNC_SPI1_B_CLK (MTK_PIN_NO(10) | 1) +#define PINMUX_GPIO10__FUNC_ANT_SEL5 (MTK_PIN_NO(10) | 2) +#define PINMUX_GPIO10__FUNC_CMMCLK3 (MTK_PIN_NO(10) | 3) +#define PINMUX_GPIO10__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(10) | 4) +#define PINMUX_GPIO10__FUNC_TDM_DATA3 (MTK_PIN_NO(10) | 5) +#define PINMUX_GPIO10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 6) +#define PINMUX_GPIO10__FUNC_DBG_MON_B11 (MTK_PIN_NO(10) | 7) + +#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define PINMUX_GPIO11__FUNC_TP_URXD1_AO (MTK_PIN_NO(11) | 1) +#define PINMUX_GPIO11__FUNC_IDDIG (MTK_PIN_NO(11) | 2) +#define PINMUX_GPIO11__FUNC_SCL6 (MTK_PIN_NO(11) | 3) +#define PINMUX_GPIO11__FUNC_UCTS1 (MTK_PIN_NO(11) | 4) +#define PINMUX_GPIO11__FUNC_UCTS0 (MTK_PIN_NO(11) | 5) +#define PINMUX_GPIO11__FUNC_SRCLKENAI1 (MTK_PIN_NO(11) | 6) +#define PINMUX_GPIO11__FUNC_I2S5_MCK (MTK_PIN_NO(11) | 7) + +#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define PINMUX_GPIO12__FUNC_TP_UTXD1_AO (MTK_PIN_NO(12) | 1) +#define PINMUX_GPIO12__FUNC_USB_DRVVBUS (MTK_PIN_NO(12) | 2) +#define PINMUX_GPIO12__FUNC_SDA6 (MTK_PIN_NO(12) | 3) +#define PINMUX_GPIO12__FUNC_URTS1 (MTK_PIN_NO(12) | 4) +#define PINMUX_GPIO12__FUNC_URTS0 (MTK_PIN_NO(12) | 5) +#define PINMUX_GPIO12__FUNC_I2S2_DI2 (MTK_PIN_NO(12) | 6) +#define PINMUX_GPIO12__FUNC_I2S5_BCK (MTK_PIN_NO(12) | 7) + +#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define PINMUX_GPIO13__FUNC_DBPI_D0 (MTK_PIN_NO(13) | 1) +#define PINMUX_GPIO13__FUNC_SPI5_MI (MTK_PIN_NO(13) | 2) +#define PINMUX_GPIO13__FUNC_PCM0_SYNC (MTK_PIN_NO(13) | 3) +#define PINMUX_GPIO13__FUNC_MD_URXD0 (MTK_PIN_NO(13) | 4) +#define PINMUX_GPIO13__FUNC_ANT_SEL3 (MTK_PIN_NO(13) | 5) +#define PINMUX_GPIO13__FUNC_I2S0_MCK (MTK_PIN_NO(13) | 6) +#define PINMUX_GPIO13__FUNC_DBG_MON_B15 (MTK_PIN_NO(13) | 7) + +#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define PINMUX_GPIO14__FUNC_DBPI_D1 (MTK_PIN_NO(14) | 1) +#define PINMUX_GPIO14__FUNC_SPI5_CSB (MTK_PIN_NO(14) | 2) +#define PINMUX_GPIO14__FUNC_PCM0_CLK (MTK_PIN_NO(14) | 3) +#define PINMUX_GPIO14__FUNC_MD_UTXD0 (MTK_PIN_NO(14) | 4) +#define PINMUX_GPIO14__FUNC_ANT_SEL4 (MTK_PIN_NO(14) | 5) +#define PINMUX_GPIO14__FUNC_I2S0_BCK (MTK_PIN_NO(14) | 6) +#define PINMUX_GPIO14__FUNC_DBG_MON_B16 (MTK_PIN_NO(14) | 7) + +#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define PINMUX_GPIO15__FUNC_DBPI_D2 (MTK_PIN_NO(15) | 1) +#define PINMUX_GPIO15__FUNC_SPI5_MO (MTK_PIN_NO(15) | 2) +#define PINMUX_GPIO15__FUNC_PCM0_DO (MTK_PIN_NO(15) | 3) +#define PINMUX_GPIO15__FUNC_MD_URXD1 (MTK_PIN_NO(15) | 4) +#define PINMUX_GPIO15__FUNC_ANT_SEL5 (MTK_PIN_NO(15) | 5) +#define PINMUX_GPIO15__FUNC_I2S0_LRCK (MTK_PIN_NO(15) | 6) +#define PINMUX_GPIO15__FUNC_DBG_MON_B17 (MTK_PIN_NO(15) | 7) + +#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define PINMUX_GPIO16__FUNC_DBPI_D3 (MTK_PIN_NO(16) | 1) +#define PINMUX_GPIO16__FUNC_SPI5_CLK (MTK_PIN_NO(16) | 2) +#define PINMUX_GPIO16__FUNC_PCM0_DI (MTK_PIN_NO(16) | 3) +#define PINMUX_GPIO16__FUNC_MD_UTXD1 (MTK_PIN_NO(16) | 4) +#define PINMUX_GPIO16__FUNC_ANT_SEL6 (MTK_PIN_NO(16) | 5) +#define PINMUX_GPIO16__FUNC_I2S0_DI (MTK_PIN_NO(16) | 6) +#define PINMUX_GPIO16__FUNC_DBG_MON_B23 (MTK_PIN_NO(16) | 7) + +#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define PINMUX_GPIO17__FUNC_DBPI_D4 (MTK_PIN_NO(17) | 1) +#define PINMUX_GPIO17__FUNC_SPI4_MI (MTK_PIN_NO(17) | 2) +#define PINMUX_GPIO17__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(17) | 3) +#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 4) +#define PINMUX_GPIO17__FUNC_ANT_SEL7 (MTK_PIN_NO(17) | 5) +#define PINMUX_GPIO17__FUNC_I2S3_MCK (MTK_PIN_NO(17) | 6) +#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7) + +#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define PINMUX_GPIO18__FUNC_DBPI_D5 (MTK_PIN_NO(18) | 1) +#define PINMUX_GPIO18__FUNC_SPI4_CSB (MTK_PIN_NO(18) | 2) +#define PINMUX_GPIO18__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(18) | 3) +#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 4) +#define PINMUX_GPIO18__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(18) | 5) +#define PINMUX_GPIO18__FUNC_I2S3_BCK (MTK_PIN_NO(18) | 6) +#define PINMUX_GPIO18__FUNC_DBG_MON_A2 (MTK_PIN_NO(18) | 7) + +#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define PINMUX_GPIO19__FUNC_DBPI_D6 (MTK_PIN_NO(19) | 1) +#define PINMUX_GPIO19__FUNC_SPI4_MO (MTK_PIN_NO(19) | 2) +#define PINMUX_GPIO19__FUNC_CONN_MCU_TDO (MTK_PIN_NO(19) | 3) +#define PINMUX_GPIO19__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(19) | 4) +#define PINMUX_GPIO19__FUNC_URXD1 (MTK_PIN_NO(19) | 5) +#define PINMUX_GPIO19__FUNC_I2S3_LRCK (MTK_PIN_NO(19) | 6) +#define PINMUX_GPIO19__FUNC_DBG_MON_A3 (MTK_PIN_NO(19) | 7) + +#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define PINMUX_GPIO20__FUNC_DBPI_D7 (MTK_PIN_NO(20) | 1) +#define PINMUX_GPIO20__FUNC_SPI4_CLK (MTK_PIN_NO(20) | 2) +#define PINMUX_GPIO20__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(20) | 3) +#define PINMUX_GPIO20__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(20) | 4) +#define PINMUX_GPIO20__FUNC_UTXD1 (MTK_PIN_NO(20) | 5) +#define PINMUX_GPIO20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 6) +#define PINMUX_GPIO20__FUNC_DBG_MON_A19 (MTK_PIN_NO(20) | 7) + +#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define PINMUX_GPIO21__FUNC_DBPI_D8 (MTK_PIN_NO(21) | 1) +#define PINMUX_GPIO21__FUNC_SPI3_MI (MTK_PIN_NO(21) | 2) +#define PINMUX_GPIO21__FUNC_CONN_MCU_TMS (MTK_PIN_NO(21) | 3) +#define PINMUX_GPIO21__FUNC_DAP_MD32_SWD (MTK_PIN_NO(21) | 4) +#define PINMUX_GPIO21__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(21) | 5) +#define PINMUX_GPIO21__FUNC_I2S2_MCK (MTK_PIN_NO(21) | 6) +#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7) + +#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define PINMUX_GPIO22__FUNC_DBPI_D9 (MTK_PIN_NO(22) | 1) +#define PINMUX_GPIO22__FUNC_SPI3_CSB (MTK_PIN_NO(22) | 2) +#define PINMUX_GPIO22__FUNC_CONN_MCU_TCK (MTK_PIN_NO(22) | 3) +#define PINMUX_GPIO22__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(22) | 4) +#define PINMUX_GPIO22__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(22) | 5) +#define PINMUX_GPIO22__FUNC_I2S2_BCK (MTK_PIN_NO(22) | 6) +#define PINMUX_GPIO22__FUNC_DBG_MON_B6 (MTK_PIN_NO(22) | 7) + +#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define PINMUX_GPIO23__FUNC_DBPI_D10 (MTK_PIN_NO(23) | 1) +#define PINMUX_GPIO23__FUNC_SPI3_MO (MTK_PIN_NO(23) | 2) +#define PINMUX_GPIO23__FUNC_CONN_MCU_TDI (MTK_PIN_NO(23) | 3) +#define PINMUX_GPIO23__FUNC_UCTS1 (MTK_PIN_NO(23) | 4) +#define PINMUX_GPIO23__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5) +#define PINMUX_GPIO23__FUNC_I2S2_LRCK (MTK_PIN_NO(23) | 6) +#define PINMUX_GPIO23__FUNC_DBG_MON_B7 (MTK_PIN_NO(23) | 7) + +#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define PINMUX_GPIO24__FUNC_DBPI_D11 (MTK_PIN_NO(24) | 1) +#define PINMUX_GPIO24__FUNC_SPI3_CLK (MTK_PIN_NO(24) | 2) +#define PINMUX_GPIO24__FUNC_SRCLKENAI0 (MTK_PIN_NO(24) | 3) +#define PINMUX_GPIO24__FUNC_URTS1 (MTK_PIN_NO(24) | 4) +#define PINMUX_GPIO24__FUNC_IO_JTAG_TCK (MTK_PIN_NO(24) | 5) +#define PINMUX_GPIO24__FUNC_I2S2_DI (MTK_PIN_NO(24) | 6) +#define PINMUX_GPIO24__FUNC_DBG_MON_B31 (MTK_PIN_NO(24) | 7) + +#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define PINMUX_GPIO25__FUNC_DBPI_HSYNC (MTK_PIN_NO(25) | 1) +#define PINMUX_GPIO25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 2) +#define PINMUX_GPIO25__FUNC_SCL6 (MTK_PIN_NO(25) | 3) +#define PINMUX_GPIO25__FUNC_KPCOL2 (MTK_PIN_NO(25) | 4) +#define PINMUX_GPIO25__FUNC_IO_JTAG_TMS (MTK_PIN_NO(25) | 5) +#define PINMUX_GPIO25__FUNC_I2S1_MCK (MTK_PIN_NO(25) | 6) +#define PINMUX_GPIO25__FUNC_DBG_MON_B0 (MTK_PIN_NO(25) | 7) + +#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define PINMUX_GPIO26__FUNC_DBPI_VSYNC (MTK_PIN_NO(26) | 1) +#define PINMUX_GPIO26__FUNC_ANT_SEL1 (MTK_PIN_NO(26) | 2) +#define PINMUX_GPIO26__FUNC_SDA6 (MTK_PIN_NO(26) | 3) +#define PINMUX_GPIO26__FUNC_KPROW2 (MTK_PIN_NO(26) | 4) +#define PINMUX_GPIO26__FUNC_IO_JTAG_TDI (MTK_PIN_NO(26) | 5) +#define PINMUX_GPIO26__FUNC_I2S1_BCK (MTK_PIN_NO(26) | 6) +#define PINMUX_GPIO26__FUNC_DBG_MON_B1 (MTK_PIN_NO(26) | 7) + +#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define PINMUX_GPIO27__FUNC_DBPI_DE (MTK_PIN_NO(27) | 1) +#define PINMUX_GPIO27__FUNC_ANT_SEL2 (MTK_PIN_NO(27) | 2) +#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 3) +#define PINMUX_GPIO27__FUNC_DMIC_CLK (MTK_PIN_NO(27) | 4) +#define PINMUX_GPIO27__FUNC_IO_JTAG_TDO (MTK_PIN_NO(27) | 5) +#define PINMUX_GPIO27__FUNC_I2S1_LRCK (MTK_PIN_NO(27) | 6) +#define PINMUX_GPIO27__FUNC_DBG_MON_B9 (MTK_PIN_NO(27) | 7) + +#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define PINMUX_GPIO28__FUNC_DBPI_CK (MTK_PIN_NO(28) | 1) +#define PINMUX_GPIO28__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(28) | 2) +#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 3) +#define PINMUX_GPIO28__FUNC_DMIC_DAT (MTK_PIN_NO(28) | 4) +#define PINMUX_GPIO28__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(28) | 5) +#define PINMUX_GPIO28__FUNC_I2S1_DO (MTK_PIN_NO(28) | 6) +#define PINMUX_GPIO28__FUNC_DBG_MON_B32 (MTK_PIN_NO(28) | 7) + +#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define PINMUX_GPIO29__FUNC_MSDC1_CLK (MTK_PIN_NO(29) | 1) +#define PINMUX_GPIO29__FUNC_IO_JTAG_TCK (MTK_PIN_NO(29) | 2) +#define PINMUX_GPIO29__FUNC_UDI_TCK (MTK_PIN_NO(29) | 3) +#define PINMUX_GPIO29__FUNC_CONN_DSP_JCK (MTK_PIN_NO(29) | 4) +#define PINMUX_GPIO29__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(29) | 5) +#define PINMUX_GPIO29__FUNC_PCM1_CLK (MTK_PIN_NO(29) | 6) +#define PINMUX_GPIO29__FUNC_DBG_MON_A6 (MTK_PIN_NO(29) | 7) + +#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define PINMUX_GPIO30__FUNC_MSDC1_DAT3 (MTK_PIN_NO(30) | 1) +#define PINMUX_GPIO30__FUNC_DAP_MD32_SWD (MTK_PIN_NO(30) | 2) +#define PINMUX_GPIO30__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 3) +#define PINMUX_GPIO30__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(30) | 4) +#define PINMUX_GPIO30__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(30) | 5) +#define PINMUX_GPIO30__FUNC_PCM1_DI (MTK_PIN_NO(30) | 6) +#define PINMUX_GPIO30__FUNC_DBG_MON_A7 (MTK_PIN_NO(30) | 7) + +#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define PINMUX_GPIO31__FUNC_MSDC1_CMD (MTK_PIN_NO(31) | 1) +#define PINMUX_GPIO31__FUNC_IO_JTAG_TMS (MTK_PIN_NO(31) | 2) +#define PINMUX_GPIO31__FUNC_UDI_TMS (MTK_PIN_NO(31) | 3) +#define PINMUX_GPIO31__FUNC_CONN_DSP_JMS (MTK_PIN_NO(31) | 4) +#define PINMUX_GPIO31__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(31) | 5) +#define PINMUX_GPIO31__FUNC_PCM1_SYNC (MTK_PIN_NO(31) | 6) +#define PINMUX_GPIO31__FUNC_DBG_MON_A8 (MTK_PIN_NO(31) | 7) + +#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define PINMUX_GPIO32__FUNC_MSDC1_DAT0 (MTK_PIN_NO(32) | 1) +#define PINMUX_GPIO32__FUNC_IO_JTAG_TDI (MTK_PIN_NO(32) | 2) +#define PINMUX_GPIO32__FUNC_UDI_TDI (MTK_PIN_NO(32) | 3) +#define PINMUX_GPIO32__FUNC_CONN_DSP_JDI (MTK_PIN_NO(32) | 4) +#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(32) | 5) +#define PINMUX_GPIO32__FUNC_PCM1_DO0 (MTK_PIN_NO(32) | 6) +#define PINMUX_GPIO32__FUNC_DBG_MON_A9 (MTK_PIN_NO(32) | 7) + +#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define PINMUX_GPIO33__FUNC_MSDC1_DAT2 (MTK_PIN_NO(33) | 1) +#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 2) +#define PINMUX_GPIO33__FUNC_UDI_NTRST (MTK_PIN_NO(33) | 3) +#define PINMUX_GPIO33__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(33) | 4) +#define PINMUX_GPIO33__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(33) | 5) +#define PINMUX_GPIO33__FUNC_PCM1_DO2 (MTK_PIN_NO(33) | 6) +#define PINMUX_GPIO33__FUNC_DBG_MON_A10 (MTK_PIN_NO(33) | 7) + +#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define PINMUX_GPIO34__FUNC_MSDC1_DAT1 (MTK_PIN_NO(34) | 1) +#define PINMUX_GPIO34__FUNC_IO_JTAG_TDO (MTK_PIN_NO(34) | 2) +#define PINMUX_GPIO34__FUNC_UDI_TDO (MTK_PIN_NO(34) | 3) +#define PINMUX_GPIO34__FUNC_CONN_DSP_JDO (MTK_PIN_NO(34) | 4) +#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(34) | 5) +#define PINMUX_GPIO34__FUNC_PCM1_DO1 (MTK_PIN_NO(34) | 6) +#define PINMUX_GPIO34__FUNC_DBG_MON_A11 (MTK_PIN_NO(34) | 7) + +#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define PINMUX_GPIO35__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(35) | 1) +#define PINMUX_GPIO35__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(35) | 2) +#define PINMUX_GPIO35__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(35) | 3) +#define PINMUX_GPIO35__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(35) | 5) +#define PINMUX_GPIO35__FUNC_CONN_DSP_JMS (MTK_PIN_NO(35) | 6) +#define PINMUX_GPIO35__FUNC_DBG_MON_A28 (MTK_PIN_NO(35) | 7) + +#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define PINMUX_GPIO36__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(36) | 1) +#define PINMUX_GPIO36__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(36) | 2) +#define PINMUX_GPIO36__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(36) | 3) +#define PINMUX_GPIO36__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(36) | 4) +#define PINMUX_GPIO36__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(36) | 5) +#define PINMUX_GPIO36__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(36) | 6) +#define PINMUX_GPIO36__FUNC_DBG_MON_A29 (MTK_PIN_NO(36) | 7) + +#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define PINMUX_GPIO37__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(37) | 1) +#define PINMUX_GPIO37__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(37) | 2) +#define PINMUX_GPIO37__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(37) | 3) +#define PINMUX_GPIO37__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(37) | 5) +#define PINMUX_GPIO37__FUNC_CONN_DSP_JDO (MTK_PIN_NO(37) | 6) +#define PINMUX_GPIO37__FUNC_DBG_MON_A30 (MTK_PIN_NO(37) | 7) + +#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define PINMUX_GPIO38__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(38) | 1) +#define PINMUX_GPIO38__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(38) | 3) +#define PINMUX_GPIO38__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(38) | 4) +#define PINMUX_GPIO38__FUNC_DBG_MON_A20 (MTK_PIN_NO(38) | 7) + +#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define PINMUX_GPIO39__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(39) | 1) +#define PINMUX_GPIO39__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(39) | 2) +#define PINMUX_GPIO39__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(39) | 3) +#define PINMUX_GPIO39__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(39) | 5) +#define PINMUX_GPIO39__FUNC_CONN_DSP_JCK (MTK_PIN_NO(39) | 6) +#define PINMUX_GPIO39__FUNC_DBG_MON_A31 (MTK_PIN_NO(39) | 7) + +#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define PINMUX_GPIO40__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(40) | 1) +#define PINMUX_GPIO40__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(40) | 2) +#define PINMUX_GPIO40__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(40) | 3) +#define PINMUX_GPIO40__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(40) | 5) +#define PINMUX_GPIO40__FUNC_CONN_DSP_JDI (MTK_PIN_NO(40) | 6) +#define PINMUX_GPIO40__FUNC_DBG_MON_A32 (MTK_PIN_NO(40) | 7) + +#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define PINMUX_GPIO41__FUNC_IDDIG (MTK_PIN_NO(41) | 1) +#define PINMUX_GPIO41__FUNC_URXD1 (MTK_PIN_NO(41) | 2) +#define PINMUX_GPIO41__FUNC_UCTS0 (MTK_PIN_NO(41) | 3) +#define PINMUX_GPIO41__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(41) | 4) +#define PINMUX_GPIO41__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 5) +#define PINMUX_GPIO41__FUNC_DMIC_CLK (MTK_PIN_NO(41) | 6) + +#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define PINMUX_GPIO42__FUNC_USB_DRVVBUS (MTK_PIN_NO(42) | 1) +#define PINMUX_GPIO42__FUNC_UTXD1 (MTK_PIN_NO(42) | 2) +#define PINMUX_GPIO42__FUNC_URTS0 (MTK_PIN_NO(42) | 3) +#define PINMUX_GPIO42__FUNC_SSPM_URXD_AO (MTK_PIN_NO(42) | 4) +#define PINMUX_GPIO42__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(42) | 5) +#define PINMUX_GPIO42__FUNC_DMIC_DAT (MTK_PIN_NO(42) | 6) + +#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define PINMUX_GPIO43__FUNC_DISP_PWM (MTK_PIN_NO(43) | 1) + +#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define PINMUX_GPIO44__FUNC_DSI_TE (MTK_PIN_NO(44) | 1) + +#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define PINMUX_GPIO45__FUNC_LCM_RST (MTK_PIN_NO(45) | 1) + +#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define PINMUX_GPIO46__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(46) | 1) +#define PINMUX_GPIO46__FUNC_URXD1 (MTK_PIN_NO(46) | 2) +#define PINMUX_GPIO46__FUNC_UCTS1 (MTK_PIN_NO(46) | 3) +#define PINMUX_GPIO46__FUNC_CCU_UTXD_AO (MTK_PIN_NO(46) | 4) +#define PINMUX_GPIO46__FUNC_TP_UCTS1_AO (MTK_PIN_NO(46) | 5) +#define PINMUX_GPIO46__FUNC_IDDIG (MTK_PIN_NO(46) | 6) +#define PINMUX_GPIO46__FUNC_I2S5_LRCK (MTK_PIN_NO(46) | 7) + +#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define PINMUX_GPIO47__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(47) | 1) +#define PINMUX_GPIO47__FUNC_UTXD1 (MTK_PIN_NO(47) | 2) +#define PINMUX_GPIO47__FUNC_URTS1 (MTK_PIN_NO(47) | 3) +#define PINMUX_GPIO47__FUNC_CCU_URXD_AO (MTK_PIN_NO(47) | 4) +#define PINMUX_GPIO47__FUNC_TP_URTS1_AO (MTK_PIN_NO(47) | 5) +#define PINMUX_GPIO47__FUNC_USB_DRVVBUS (MTK_PIN_NO(47) | 6) +#define PINMUX_GPIO47__FUNC_I2S5_DO (MTK_PIN_NO(47) | 7) + +#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define PINMUX_GPIO48__FUNC_SCL5 (MTK_PIN_NO(48) | 1) + +#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define PINMUX_GPIO49__FUNC_SDA5 (MTK_PIN_NO(49) | 1) + +#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define PINMUX_GPIO50__FUNC_SCL3 (MTK_PIN_NO(50) | 1) + +#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define PINMUX_GPIO51__FUNC_SDA3 (MTK_PIN_NO(51) | 1) + +#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define PINMUX_GPIO52__FUNC_BPI_ANT2 (MTK_PIN_NO(52) | 1) + +#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define PINMUX_GPIO53__FUNC_BPI_ANT0 (MTK_PIN_NO(53) | 1) + +#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define PINMUX_GPIO54__FUNC_BPI_OLAT1 (MTK_PIN_NO(54) | 1) + +#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1) + +#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1) +#define PINMUX_GPIO56__FUNC_SCL_6306 (MTK_PIN_NO(56) | 2) + +#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1) +#define PINMUX_GPIO57__FUNC_SDA_6306 (MTK_PIN_NO(57) | 2) + +#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define PINMUX_GPIO58__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(58) | 1) +#define PINMUX_GPIO58__FUNC_SPM_BSI_D2 (MTK_PIN_NO(58) | 2) +#define PINMUX_GPIO58__FUNC_PWM_B (MTK_PIN_NO(58) | 3) + +#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define PINMUX_GPIO59__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(59) | 1) +#define PINMUX_GPIO59__FUNC_SPM_BSI_D1 (MTK_PIN_NO(59) | 2) + +#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define PINMUX_GPIO60__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(60) | 1) +#define PINMUX_GPIO60__FUNC_SPM_BSI_D0 (MTK_PIN_NO(60) | 2) + +#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define PINMUX_GPIO61__FUNC_MIPI1_SDATA (MTK_PIN_NO(61) | 1) + +#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define PINMUX_GPIO62__FUNC_MIPI1_SCLK (MTK_PIN_NO(62) | 1) + +#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define PINMUX_GPIO63__FUNC_MIPI0_SDATA (MTK_PIN_NO(63) | 1) + +#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define PINMUX_GPIO64__FUNC_MIPI0_SCLK (MTK_PIN_NO(64) | 1) + +#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define PINMUX_GPIO65__FUNC_MIPI3_SDATA (MTK_PIN_NO(65) | 1) +#define PINMUX_GPIO65__FUNC_BPI_OLAT2 (MTK_PIN_NO(65) | 2) + +#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define PINMUX_GPIO66__FUNC_MIPI3_SCLK (MTK_PIN_NO(66) | 1) +#define PINMUX_GPIO66__FUNC_BPI_OLAT3 (MTK_PIN_NO(66) | 2) + +#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define PINMUX_GPIO67__FUNC_MIPI2_SDATA (MTK_PIN_NO(67) | 1) + +#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define PINMUX_GPIO68__FUNC_MIPI2_SCLK (MTK_PIN_NO(68) | 1) + +#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define PINMUX_GPIO69__FUNC_BPI_BUS7 (MTK_PIN_NO(69) | 1) + +#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define PINMUX_GPIO70__FUNC_BPI_BUS6 (MTK_PIN_NO(70) | 1) + +#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define PINMUX_GPIO71__FUNC_BPI_BUS5 (MTK_PIN_NO(71) | 1) + +#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define PINMUX_GPIO72__FUNC_BPI_BUS4 (MTK_PIN_NO(72) | 1) + +#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define PINMUX_GPIO73__FUNC_BPI_BUS3 (MTK_PIN_NO(73) | 1) + +#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define PINMUX_GPIO74__FUNC_BPI_BUS2 (MTK_PIN_NO(74) | 1) + +#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define PINMUX_GPIO75__FUNC_BPI_BUS1 (MTK_PIN_NO(75) | 1) + +#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define PINMUX_GPIO76__FUNC_BPI_BUS0 (MTK_PIN_NO(76) | 1) + +#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define PINMUX_GPIO77__FUNC_BPI_ANT1 (MTK_PIN_NO(77) | 1) + +#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define PINMUX_GPIO78__FUNC_BPI_OLAT0 (MTK_PIN_NO(78) | 1) + +#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define PINMUX_GPIO79__FUNC_BPI_PA_VM1 (MTK_PIN_NO(79) | 1) +#define PINMUX_GPIO79__FUNC_MIPI4_SDATA (MTK_PIN_NO(79) | 2) + +#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define PINMUX_GPIO80__FUNC_BPI_PA_VM0 (MTK_PIN_NO(80) | 1) +#define PINMUX_GPIO80__FUNC_MIPI4_SCLK (MTK_PIN_NO(80) | 2) + +#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define PINMUX_GPIO81__FUNC_SDA1 (MTK_PIN_NO(81) | 1) + +#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define PINMUX_GPIO82__FUNC_SDA0 (MTK_PIN_NO(82) | 1) + +#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define PINMUX_GPIO83__FUNC_SCL0 (MTK_PIN_NO(83) | 1) + +#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define PINMUX_GPIO84__FUNC_SCL1 (MTK_PIN_NO(84) | 1) + +#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define PINMUX_GPIO85__FUNC_SPI0_MI (MTK_PIN_NO(85) | 1) +#define PINMUX_GPIO85__FUNC_SCP_SPI0_MI (MTK_PIN_NO(85) | 2) +#define PINMUX_GPIO85__FUNC_CLKM3 (MTK_PIN_NO(85) | 3) +#define PINMUX_GPIO85__FUNC_I2S1_BCK (MTK_PIN_NO(85) | 4) +#define PINMUX_GPIO85__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(85) | 5) +#define PINMUX_GPIO85__FUNC_DFD_TDO (MTK_PIN_NO(85) | 6) +#define PINMUX_GPIO85__FUNC_JTDO_SEL1 (MTK_PIN_NO(85) | 7) + +#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define PINMUX_GPIO86__FUNC_SPI0_CSB (MTK_PIN_NO(86) | 1) +#define PINMUX_GPIO86__FUNC_SCP_SPI0_CS (MTK_PIN_NO(86) | 2) +#define PINMUX_GPIO86__FUNC_CLKM0 (MTK_PIN_NO(86) | 3) +#define PINMUX_GPIO86__FUNC_I2S1_LRCK (MTK_PIN_NO(86) | 4) +#define PINMUX_GPIO86__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(86) | 5) +#define PINMUX_GPIO86__FUNC_DFD_TMS (MTK_PIN_NO(86) | 6) +#define PINMUX_GPIO86__FUNC_JTMS_SEL1 (MTK_PIN_NO(86) | 7) + +#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define PINMUX_GPIO87__FUNC_SPI0_MO (MTK_PIN_NO(87) | 1) +#define PINMUX_GPIO87__FUNC_SCP_SPI0_MO (MTK_PIN_NO(87) | 2) +#define PINMUX_GPIO87__FUNC_SDA1 (MTK_PIN_NO(87) | 3) +#define PINMUX_GPIO87__FUNC_I2S1_DO (MTK_PIN_NO(87) | 4) +#define PINMUX_GPIO87__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(87) | 5) +#define PINMUX_GPIO87__FUNC_DFD_TDI (MTK_PIN_NO(87) | 6) +#define PINMUX_GPIO87__FUNC_JTDI_SEL1 (MTK_PIN_NO(87) | 7) + +#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define PINMUX_GPIO88__FUNC_SPI0_CLK (MTK_PIN_NO(88) | 1) +#define PINMUX_GPIO88__FUNC_SCP_SPI0_CK (MTK_PIN_NO(88) | 2) +#define PINMUX_GPIO88__FUNC_SCL1 (MTK_PIN_NO(88) | 3) +#define PINMUX_GPIO88__FUNC_I2S1_MCK (MTK_PIN_NO(88) | 4) +#define PINMUX_GPIO88__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(88) | 5) +#define PINMUX_GPIO88__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 6) +#define PINMUX_GPIO88__FUNC_JTCK_SEL1 (MTK_PIN_NO(88) | 7) + +#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define PINMUX_GPIO89__FUNC_SRCLKENAI0 (MTK_PIN_NO(89) | 1) +#define PINMUX_GPIO89__FUNC_PWM_C (MTK_PIN_NO(89) | 2) +#define PINMUX_GPIO89__FUNC_I2S5_BCK (MTK_PIN_NO(89) | 3) +#define PINMUX_GPIO89__FUNC_ANT_SEL6 (MTK_PIN_NO(89) | 4) +#define PINMUX_GPIO89__FUNC_SDA8 (MTK_PIN_NO(89) | 5) +#define PINMUX_GPIO89__FUNC_CMVREF0 (MTK_PIN_NO(89) | 6) +#define PINMUX_GPIO89__FUNC_DBG_MON_A21 (MTK_PIN_NO(89) | 7) + +#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define PINMUX_GPIO90__FUNC_PWM_A (MTK_PIN_NO(90) | 1) +#define PINMUX_GPIO90__FUNC_CMMCLK2 (MTK_PIN_NO(90) | 2) +#define PINMUX_GPIO90__FUNC_I2S5_LRCK (MTK_PIN_NO(90) | 3) +#define PINMUX_GPIO90__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(90) | 4) +#define PINMUX_GPIO90__FUNC_SCL8 (MTK_PIN_NO(90) | 5) +#define PINMUX_GPIO90__FUNC_PTA_RXD (MTK_PIN_NO(90) | 6) +#define PINMUX_GPIO90__FUNC_DBG_MON_A22 (MTK_PIN_NO(90) | 7) + +#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define PINMUX_GPIO91__FUNC_KPROW1 (MTK_PIN_NO(91) | 1) +#define PINMUX_GPIO91__FUNC_PWM_B (MTK_PIN_NO(91) | 2) +#define PINMUX_GPIO91__FUNC_I2S5_DO (MTK_PIN_NO(91) | 3) +#define PINMUX_GPIO91__FUNC_ANT_SEL7 (MTK_PIN_NO(91) | 4) +#define PINMUX_GPIO91__FUNC_CMMCLK3 (MTK_PIN_NO(91) | 5) +#define PINMUX_GPIO91__FUNC_PTA_TXD (MTK_PIN_NO(91) | 6) + +#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define PINMUX_GPIO92__FUNC_KPROW0 (MTK_PIN_NO(92) | 1) + +#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define PINMUX_GPIO93__FUNC_KPCOL0 (MTK_PIN_NO(93) | 1) +#define PINMUX_GPIO93__FUNC_DBG_MON_B27 (MTK_PIN_NO(93) | 7) + +#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define PINMUX_GPIO94__FUNC_KPCOL1 (MTK_PIN_NO(94) | 1) +#define PINMUX_GPIO94__FUNC_I2S2_DI2 (MTK_PIN_NO(94) | 2) +#define PINMUX_GPIO94__FUNC_I2S5_MCK (MTK_PIN_NO(94) | 3) +#define PINMUX_GPIO94__FUNC_CMMCLK2 (MTK_PIN_NO(94) | 4) +#define PINMUX_GPIO94__FUNC_SCP_SPI2_MI (MTK_PIN_NO(94) | 5) +#define PINMUX_GPIO94__FUNC_SRCLKENAI1 (MTK_PIN_NO(94) | 6) +#define PINMUX_GPIO94__FUNC_SPI2_MI (MTK_PIN_NO(94) | 7) + +#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define PINMUX_GPIO95__FUNC_URXD0 (MTK_PIN_NO(95) | 1) +#define PINMUX_GPIO95__FUNC_UTXD0 (MTK_PIN_NO(95) | 2) +#define PINMUX_GPIO95__FUNC_MD_URXD0 (MTK_PIN_NO(95) | 3) +#define PINMUX_GPIO95__FUNC_MD_URXD1 (MTK_PIN_NO(95) | 4) +#define PINMUX_GPIO95__FUNC_SSPM_URXD_AO (MTK_PIN_NO(95) | 5) +#define PINMUX_GPIO95__FUNC_CCU_URXD_AO (MTK_PIN_NO(95) | 6) + +#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define PINMUX_GPIO96__FUNC_UTXD0 (MTK_PIN_NO(96) | 1) +#define PINMUX_GPIO96__FUNC_URXD0 (MTK_PIN_NO(96) | 2) +#define PINMUX_GPIO96__FUNC_MD_UTXD0 (MTK_PIN_NO(96) | 3) +#define PINMUX_GPIO96__FUNC_MD_UTXD1 (MTK_PIN_NO(96) | 4) +#define PINMUX_GPIO96__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(96) | 5) +#define PINMUX_GPIO96__FUNC_CCU_UTXD_AO (MTK_PIN_NO(96) | 6) +#define PINMUX_GPIO96__FUNC_DBG_MON_B2 (MTK_PIN_NO(96) | 7) + +#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define PINMUX_GPIO97__FUNC_UCTS0 (MTK_PIN_NO(97) | 1) +#define PINMUX_GPIO97__FUNC_I2S2_MCK (MTK_PIN_NO(97) | 2) +#define PINMUX_GPIO97__FUNC_IDDIG (MTK_PIN_NO(97) | 3) +#define PINMUX_GPIO97__FUNC_CONN_MCU_TDO (MTK_PIN_NO(97) | 4) +#define PINMUX_GPIO97__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(97) | 5) +#define PINMUX_GPIO97__FUNC_IO_JTAG_TDO (MTK_PIN_NO(97) | 6) +#define PINMUX_GPIO97__FUNC_DBG_MON_B3 (MTK_PIN_NO(97) | 7) + +#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define PINMUX_GPIO98__FUNC_URTS0 (MTK_PIN_NO(98) | 1) +#define PINMUX_GPIO98__FUNC_I2S2_BCK (MTK_PIN_NO(98) | 2) +#define PINMUX_GPIO98__FUNC_USB_DRVVBUS (MTK_PIN_NO(98) | 3) +#define PINMUX_GPIO98__FUNC_CONN_MCU_TMS (MTK_PIN_NO(98) | 4) +#define PINMUX_GPIO98__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(98) | 5) +#define PINMUX_GPIO98__FUNC_IO_JTAG_TMS (MTK_PIN_NO(98) | 6) +#define PINMUX_GPIO98__FUNC_DBG_MON_B4 (MTK_PIN_NO(98) | 7) + +#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define PINMUX_GPIO99__FUNC_CMMCLK0 (MTK_PIN_NO(99) | 1) +#define PINMUX_GPIO99__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(99) | 4) +#define PINMUX_GPIO99__FUNC_DBG_MON_B28 (MTK_PIN_NO(99) | 7) + +#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define PINMUX_GPIO100__FUNC_CMMCLK1 (MTK_PIN_NO(100) | 1) +#define PINMUX_GPIO100__FUNC_PWM_C (MTK_PIN_NO(100) | 2) +#define PINMUX_GPIO100__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(100) | 3) +#define PINMUX_GPIO100__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(100) | 4) +#define PINMUX_GPIO100__FUNC_DBG_MON_B29 (MTK_PIN_NO(100) | 7) + +#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define PINMUX_GPIO101__FUNC_CLKM2 (MTK_PIN_NO(101) | 1) +#define PINMUX_GPIO101__FUNC_I2S2_LRCK (MTK_PIN_NO(101) | 2) +#define PINMUX_GPIO101__FUNC_CMVREF1 (MTK_PIN_NO(101) | 3) +#define PINMUX_GPIO101__FUNC_CONN_MCU_TCK (MTK_PIN_NO(101) | 4) +#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(101) | 5) +#define PINMUX_GPIO101__FUNC_IO_JTAG_TCK (MTK_PIN_NO(101) | 6) + +#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define PINMUX_GPIO102__FUNC_CLKM1 (MTK_PIN_NO(102) | 1) +#define PINMUX_GPIO102__FUNC_I2S2_DI (MTK_PIN_NO(102) | 2) +#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 3) +#define PINMUX_GPIO102__FUNC_CONN_MCU_TDI (MTK_PIN_NO(102) | 4) +#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(102) | 5) +#define PINMUX_GPIO102__FUNC_IO_JTAG_TDI (MTK_PIN_NO(102) | 6) +#define PINMUX_GPIO102__FUNC_DBG_MON_B8 (MTK_PIN_NO(102) | 7) + +#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define PINMUX_GPIO103__FUNC_SCL2 (MTK_PIN_NO(103) | 1) + +#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define PINMUX_GPIO104__FUNC_SDA2 (MTK_PIN_NO(104) | 1) + +#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define PINMUX_GPIO105__FUNC_SCL4 (MTK_PIN_NO(105) | 1) + +#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define PINMUX_GPIO106__FUNC_SDA4 (MTK_PIN_NO(106) | 1) + +#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1) +#define PINMUX_GPIO107__FUNC_ANT_SEL0 (MTK_PIN_NO(107) | 2) +#define PINMUX_GPIO107__FUNC_CLKM0 (MTK_PIN_NO(107) | 3) +#define PINMUX_GPIO107__FUNC_SDA7 (MTK_PIN_NO(107) | 4) +#define PINMUX_GPIO107__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(107) | 5) +#define PINMUX_GPIO107__FUNC_PWM_A (MTK_PIN_NO(107) | 6) +#define PINMUX_GPIO107__FUNC_DBG_MON_B12 (MTK_PIN_NO(107) | 7) + +#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define PINMUX_GPIO108__FUNC_CMMCLK2 (MTK_PIN_NO(108) | 1) +#define PINMUX_GPIO108__FUNC_ANT_SEL1 (MTK_PIN_NO(108) | 2) +#define PINMUX_GPIO108__FUNC_CLKM1 (MTK_PIN_NO(108) | 3) +#define PINMUX_GPIO108__FUNC_SCL8 (MTK_PIN_NO(108) | 4) +#define PINMUX_GPIO108__FUNC_DAP_MD32_SWD (MTK_PIN_NO(108) | 5) +#define PINMUX_GPIO108__FUNC_PWM_B (MTK_PIN_NO(108) | 6) +#define PINMUX_GPIO108__FUNC_DBG_MON_B13 (MTK_PIN_NO(108) | 7) + +#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define PINMUX_GPIO109__FUNC_DMIC_DAT (MTK_PIN_NO(109) | 1) +#define PINMUX_GPIO109__FUNC_ANT_SEL2 (MTK_PIN_NO(109) | 2) +#define PINMUX_GPIO109__FUNC_CLKM2 (MTK_PIN_NO(109) | 3) +#define PINMUX_GPIO109__FUNC_SDA8 (MTK_PIN_NO(109) | 4) +#define PINMUX_GPIO109__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(109) | 5) +#define PINMUX_GPIO109__FUNC_PWM_C (MTK_PIN_NO(109) | 6) +#define PINMUX_GPIO109__FUNC_DBG_MON_B14 (MTK_PIN_NO(109) | 7) + +#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define PINMUX_GPIO110__FUNC_SCL7 (MTK_PIN_NO(110) | 1) +#define PINMUX_GPIO110__FUNC_ANT_SEL0 (MTK_PIN_NO(110) | 2) +#define PINMUX_GPIO110__FUNC_TP_URXD1_AO (MTK_PIN_NO(110) | 3) +#define PINMUX_GPIO110__FUNC_USB_DRVVBUS (MTK_PIN_NO(110) | 4) +#define PINMUX_GPIO110__FUNC_SRCLKENAI1 (MTK_PIN_NO(110) | 5) +#define PINMUX_GPIO110__FUNC_KPCOL2 (MTK_PIN_NO(110) | 6) +#define PINMUX_GPIO110__FUNC_URXD1 (MTK_PIN_NO(110) | 7) + +#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define PINMUX_GPIO111__FUNC_CMMCLK3 (MTK_PIN_NO(111) | 1) +#define PINMUX_GPIO111__FUNC_ANT_SEL1 (MTK_PIN_NO(111) | 2) +#define PINMUX_GPIO111__FUNC_SRCLKENAI0 (MTK_PIN_NO(111) | 3) +#define PINMUX_GPIO111__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(111) | 4) +#define PINMUX_GPIO111__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(111) | 5) +#define PINMUX_GPIO111__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(111) | 7) + +#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define PINMUX_GPIO112__FUNC_SDA7 (MTK_PIN_NO(112) | 1) +#define PINMUX_GPIO112__FUNC_ANT_SEL2 (MTK_PIN_NO(112) | 2) +#define PINMUX_GPIO112__FUNC_TP_UTXD1_AO (MTK_PIN_NO(112) | 3) +#define PINMUX_GPIO112__FUNC_IDDIG (MTK_PIN_NO(112) | 4) +#define PINMUX_GPIO112__FUNC_AGPS_SYNC (MTK_PIN_NO(112) | 5) +#define PINMUX_GPIO112__FUNC_KPROW2 (MTK_PIN_NO(112) | 6) +#define PINMUX_GPIO112__FUNC_UTXD1 (MTK_PIN_NO(112) | 7) + +#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define PINMUX_GPIO113__FUNC_CONN_TOP_CLK (MTK_PIN_NO(113) | 1) +#define PINMUX_GPIO113__FUNC_SCL6 (MTK_PIN_NO(113) | 3) +#define PINMUX_GPIO113__FUNC_AUXIF_CLK0 (MTK_PIN_NO(113) | 4) +#define PINMUX_GPIO113__FUNC_TP_UCTS1_AO (MTK_PIN_NO(113) | 6) + +#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define PINMUX_GPIO114__FUNC_CONN_TOP_DATA (MTK_PIN_NO(114) | 1) +#define PINMUX_GPIO114__FUNC_SDA6 (MTK_PIN_NO(114) | 3) +#define PINMUX_GPIO114__FUNC_AUXIF_ST0 (MTK_PIN_NO(114) | 4) +#define PINMUX_GPIO114__FUNC_TP_URTS1_AO (MTK_PIN_NO(114) | 6) + +#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define PINMUX_GPIO115__FUNC_CONN_BT_CLK (MTK_PIN_NO(115) | 1) +#define PINMUX_GPIO115__FUNC_UTXD1 (MTK_PIN_NO(115) | 2) +#define PINMUX_GPIO115__FUNC_PTA_TXD (MTK_PIN_NO(115) | 3) +#define PINMUX_GPIO115__FUNC_AUXIF_CLK1 (MTK_PIN_NO(115) | 4) +#define PINMUX_GPIO115__FUNC_DAP_MD32_SWD (MTK_PIN_NO(115) | 5) +#define PINMUX_GPIO115__FUNC_TP_UTXD1_AO (MTK_PIN_NO(115) | 6) + +#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define PINMUX_GPIO116__FUNC_CONN_BT_DATA (MTK_PIN_NO(116) | 1) +#define PINMUX_GPIO116__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(116) | 2) +#define PINMUX_GPIO116__FUNC_AUXIF_ST1 (MTK_PIN_NO(116) | 4) +#define PINMUX_GPIO116__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(116) | 5) +#define PINMUX_GPIO116__FUNC_TP_URXD2_AO (MTK_PIN_NO(116) | 6) +#define PINMUX_GPIO116__FUNC_DBG_MON_A0 (MTK_PIN_NO(116) | 7) + +#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define PINMUX_GPIO117__FUNC_CONN_WF_HB0 (MTK_PIN_NO(117) | 1) +#define PINMUX_GPIO117__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(117) | 2) +#define PINMUX_GPIO117__FUNC_TP_UTXD2_AO (MTK_PIN_NO(117) | 6) +#define PINMUX_GPIO117__FUNC_DBG_MON_A4 (MTK_PIN_NO(117) | 7) + +#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define PINMUX_GPIO118__FUNC_CONN_WF_HB1 (MTK_PIN_NO(118) | 1) +#define PINMUX_GPIO118__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(118) | 2) +#define PINMUX_GPIO118__FUNC_SSPM_URXD_AO (MTK_PIN_NO(118) | 5) +#define PINMUX_GPIO118__FUNC_TP_UCTS2_AO (MTK_PIN_NO(118) | 6) +#define PINMUX_GPIO118__FUNC_DBG_MON_A5 (MTK_PIN_NO(118) | 7) + +#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define PINMUX_GPIO119__FUNC_CONN_WF_HB2 (MTK_PIN_NO(119) | 1) +#define PINMUX_GPIO119__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(119) | 2) +#define PINMUX_GPIO119__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(119) | 5) +#define PINMUX_GPIO119__FUNC_TP_URTS2_AO (MTK_PIN_NO(119) | 6) + +#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define PINMUX_GPIO120__FUNC_CONN_WB_PTA (MTK_PIN_NO(120) | 1) +#define PINMUX_GPIO120__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(120) | 2) +#define PINMUX_GPIO120__FUNC_CCU_URXD_AO (MTK_PIN_NO(120) | 5) + +#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define PINMUX_GPIO121__FUNC_CONN_HRST_B (MTK_PIN_NO(121) | 1) +#define PINMUX_GPIO121__FUNC_URXD1 (MTK_PIN_NO(121) | 2) +#define PINMUX_GPIO121__FUNC_PTA_RXD (MTK_PIN_NO(121) | 3) +#define PINMUX_GPIO121__FUNC_CCU_UTXD_AO (MTK_PIN_NO(121) | 5) +#define PINMUX_GPIO121__FUNC_TP_URXD1_AO (MTK_PIN_NO(121) | 6) + +#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define PINMUX_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1) +#define PINMUX_GPIO122__FUNC_SSPM_URXD2_AO (MTK_PIN_NO(122) | 2) +#define PINMUX_GPIO122__FUNC_ANT_SEL1 (MTK_PIN_NO(122) | 3) +#define PINMUX_GPIO122__FUNC_DBG_MON_A12 (MTK_PIN_NO(122) | 7) + +#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define PINMUX_GPIO123__FUNC_MSDC0_DAT0 (MTK_PIN_NO(123) | 1) +#define PINMUX_GPIO123__FUNC_ANT_SEL0 (MTK_PIN_NO(123) | 3) +#define PINMUX_GPIO123__FUNC_DBG_MON_A13 (MTK_PIN_NO(123) | 7) + +#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define PINMUX_GPIO124__FUNC_MSDC0_CLK (MTK_PIN_NO(124) | 1) +#define PINMUX_GPIO124__FUNC_DBG_MON_A14 (MTK_PIN_NO(124) | 7) + +#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define PINMUX_GPIO125__FUNC_MSDC0_DAT2 (MTK_PIN_NO(125) | 1) +#define PINMUX_GPIO125__FUNC_MRG_CLK (MTK_PIN_NO(125) | 3) +#define PINMUX_GPIO125__FUNC_DBG_MON_A15 (MTK_PIN_NO(125) | 7) + +#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define PINMUX_GPIO126__FUNC_MSDC0_DAT4 (MTK_PIN_NO(126) | 1) +#define PINMUX_GPIO126__FUNC_ANT_SEL5 (MTK_PIN_NO(126) | 3) +#define PINMUX_GPIO126__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(126) | 6) +#define PINMUX_GPIO126__FUNC_DBG_MON_A16 (MTK_PIN_NO(126) | 7) + +#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define PINMUX_GPIO127__FUNC_MSDC0_DAT6 (MTK_PIN_NO(127) | 1) +#define PINMUX_GPIO127__FUNC_ANT_SEL4 (MTK_PIN_NO(127) | 3) +#define PINMUX_GPIO127__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(127) | 6) +#define PINMUX_GPIO127__FUNC_DBG_MON_A17 (MTK_PIN_NO(127) | 7) + +#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define PINMUX_GPIO128__FUNC_MSDC0_DAT1 (MTK_PIN_NO(128) | 1) +#define PINMUX_GPIO128__FUNC_ANT_SEL2 (MTK_PIN_NO(128) | 3) +#define PINMUX_GPIO128__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(128) | 6) +#define PINMUX_GPIO128__FUNC_DBG_MON_A18 (MTK_PIN_NO(128) | 7) + +#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define PINMUX_GPIO129__FUNC_MSDC0_DAT5 (MTK_PIN_NO(129) | 1) +#define PINMUX_GPIO129__FUNC_ANT_SEL3 (MTK_PIN_NO(129) | 3) +#define PINMUX_GPIO129__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(129) | 6) +#define PINMUX_GPIO129__FUNC_DBG_MON_A23 (MTK_PIN_NO(129) | 7) + +#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define PINMUX_GPIO130__FUNC_MSDC0_DAT7 (MTK_PIN_NO(130) | 1) +#define PINMUX_GPIO130__FUNC_MRG_DO (MTK_PIN_NO(130) | 3) +#define PINMUX_GPIO130__FUNC_DBG_MON_A24 (MTK_PIN_NO(130) | 7) + +#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define PINMUX_GPIO131__FUNC_MSDC0_DSL (MTK_PIN_NO(131) | 1) +#define PINMUX_GPIO131__FUNC_MRG_SYNC (MTK_PIN_NO(131) | 3) +#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7) + +#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define PINMUX_GPIO132__FUNC_MSDC0_DAT3 (MTK_PIN_NO(132) | 1) +#define PINMUX_GPIO132__FUNC_MRG_DI (MTK_PIN_NO(132) | 3) +#define PINMUX_GPIO132__FUNC_DBG_MON_A26 (MTK_PIN_NO(132) | 7) + +#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define PINMUX_GPIO133__FUNC_MSDC0_RSTB (MTK_PIN_NO(133) | 1) +#define PINMUX_GPIO133__FUNC_AGPS_SYNC (MTK_PIN_NO(133) | 3) +#define PINMUX_GPIO133__FUNC_DBG_MON_A27 (MTK_PIN_NO(133) | 7) + +#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define PINMUX_GPIO134__FUNC_RTC32K_CK (MTK_PIN_NO(134) | 1) + +#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define PINMUX_GPIO135__FUNC_WATCHDOG (MTK_PIN_NO(135) | 1) + +#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define PINMUX_GPIO136__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(136) | 1) +#define PINMUX_GPIO136__FUNC_AUD_CLK_MISO (MTK_PIN_NO(136) | 2) +#define PINMUX_GPIO136__FUNC_I2S1_MCK (MTK_PIN_NO(136) | 3) +#define PINMUX_GPIO136__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(136) | 6) + +#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(137) | 1) +#define PINMUX_GPIO137__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(137) | 2) +#define PINMUX_GPIO137__FUNC_I2S1_BCK (MTK_PIN_NO(137) | 3) + +#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(138) | 1) +#define PINMUX_GPIO138__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(138) | 2) +#define PINMUX_GPIO138__FUNC_I2S1_LRCK (MTK_PIN_NO(138) | 3) +#define PINMUX_GPIO138__FUNC_DBG_MON_B24 (MTK_PIN_NO(138) | 7) + +#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(139) | 1) +#define PINMUX_GPIO139__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(139) | 2) +#define PINMUX_GPIO139__FUNC_I2S1_DO (MTK_PIN_NO(139) | 3) +#define PINMUX_GPIO139__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(139) | 6) + +#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define PINMUX_GPIO140__FUNC_AUD_CLK_MISO (MTK_PIN_NO(140) | 1) +#define PINMUX_GPIO140__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(140) | 2) +#define PINMUX_GPIO140__FUNC_I2S0_MCK (MTK_PIN_NO(140) | 3) +#define PINMUX_GPIO140__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(140) | 6) + +#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define PINMUX_GPIO141__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(141) | 1) +#define PINMUX_GPIO141__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(141) | 2) +#define PINMUX_GPIO141__FUNC_I2S0_BCK (MTK_PIN_NO(141) | 3) + +#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define PINMUX_GPIO142__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(142) | 1) +#define PINMUX_GPIO142__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(142) | 2) +#define PINMUX_GPIO142__FUNC_I2S0_LRCK (MTK_PIN_NO(142) | 3) +#define PINMUX_GPIO142__FUNC_VOW_DAT_MISO (MTK_PIN_NO(142) | 4) +#define PINMUX_GPIO142__FUNC_DBG_MON_B25 (MTK_PIN_NO(142) | 7) + +#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define PINMUX_GPIO143__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(143) | 1) +#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(143) | 2) +#define PINMUX_GPIO143__FUNC_I2S0_DI (MTK_PIN_NO(143) | 3) +#define PINMUX_GPIO143__FUNC_VOW_CLK_MISO (MTK_PIN_NO(143) | 4) +#define PINMUX_GPIO143__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(143) | 6) +#define PINMUX_GPIO143__FUNC_DBG_MON_B26 (MTK_PIN_NO(143) | 7) + +#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(144) | 1) +#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(144) | 2) + +#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define PINMUX_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1) + +#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(146) | 1) +#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(146) | 2) + +#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define PINMUX_GPIO147__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(147) | 1) + +#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define PINMUX_GPIO148__FUNC_SRCLKENA0 (MTK_PIN_NO(148) | 1) + +#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define PINMUX_GPIO149__FUNC_SRCLKENA1 (MTK_PIN_NO(149) | 1) + +#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define PINMUX_GPIO150__FUNC_PWM_A (MTK_PIN_NO(150) | 1) +#define PINMUX_GPIO150__FUNC_CMFLASH (MTK_PIN_NO(150) | 2) +#define PINMUX_GPIO150__FUNC_CLKM0 (MTK_PIN_NO(150) | 3) +#define PINMUX_GPIO150__FUNC_DBG_MON_B30 (MTK_PIN_NO(150) | 7) + +#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define PINMUX_GPIO151__FUNC_PWM_B (MTK_PIN_NO(151) | 1) +#define PINMUX_GPIO151__FUNC_CMVREF0 (MTK_PIN_NO(151) | 2) +#define PINMUX_GPIO151__FUNC_CLKM1 (MTK_PIN_NO(151) | 3) +#define PINMUX_GPIO151__FUNC_DBG_MON_B20 (MTK_PIN_NO(151) | 7) + +#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define PINMUX_GPIO152__FUNC_PWM_C (MTK_PIN_NO(152) | 1) +#define PINMUX_GPIO152__FUNC_CMFLASH (MTK_PIN_NO(152) | 2) +#define PINMUX_GPIO152__FUNC_CLKM2 (MTK_PIN_NO(152) | 3) +#define PINMUX_GPIO152__FUNC_DBG_MON_B21 (MTK_PIN_NO(152) | 7) + +#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define PINMUX_GPIO153__FUNC_PWM_A (MTK_PIN_NO(153) | 1) +#define PINMUX_GPIO153__FUNC_CMVREF0 (MTK_PIN_NO(153) | 2) +#define PINMUX_GPIO153__FUNC_CLKM3 (MTK_PIN_NO(153) | 3) +#define PINMUX_GPIO153__FUNC_DBG_MON_B22 (MTK_PIN_NO(153) | 7) + +#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define PINMUX_GPIO154__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(154) | 1) +#define PINMUX_GPIO154__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(154) | 2) +#define PINMUX_GPIO154__FUNC_DBG_MON_B18 (MTK_PIN_NO(154) | 7) + +#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define PINMUX_GPIO155__FUNC_ANT_SEL0 (MTK_PIN_NO(155) | 1) +#define PINMUX_GPIO155__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(155) | 2) +#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 3) +#define PINMUX_GPIO155__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(155) | 7) + +#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define PINMUX_GPIO156__FUNC_ANT_SEL1 (MTK_PIN_NO(156) | 1) +#define PINMUX_GPIO156__FUNC_SRCLKENAI0 (MTK_PIN_NO(156) | 2) +#define PINMUX_GPIO156__FUNC_SCL6 (MTK_PIN_NO(156) | 3) +#define PINMUX_GPIO156__FUNC_KPCOL2 (MTK_PIN_NO(156) | 4) +#define PINMUX_GPIO156__FUNC_IDDIG (MTK_PIN_NO(156) | 5) +#define PINMUX_GPIO156__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(156) | 7) + +#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define PINMUX_GPIO157__FUNC_ANT_SEL2 (MTK_PIN_NO(157) | 1) +#define PINMUX_GPIO157__FUNC_SRCLKENAI1 (MTK_PIN_NO(157) | 2) +#define PINMUX_GPIO157__FUNC_SDA6 (MTK_PIN_NO(157) | 3) +#define PINMUX_GPIO157__FUNC_KPROW2 (MTK_PIN_NO(157) | 4) +#define PINMUX_GPIO157__FUNC_USB_DRVVBUS (MTK_PIN_NO(157) | 5) +#define PINMUX_GPIO157__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(157) | 7) + +#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define PINMUX_GPIO158__FUNC_ANT_SEL3 (MTK_PIN_NO(158) | 1) + +#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define PINMUX_GPIO159__FUNC_ANT_SEL4 (MTK_PIN_NO(159) | 1) + +#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define PINMUX_GPIO160__FUNC_ANT_SEL5 (MTK_PIN_NO(160) | 1) + +#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define PINMUX_GPIO161__FUNC_SPI1_A_MI (MTK_PIN_NO(161) | 1) +#define PINMUX_GPIO161__FUNC_SCP_SPI1_MI (MTK_PIN_NO(161) | 2) +#define PINMUX_GPIO161__FUNC_IDDIG (MTK_PIN_NO(161) | 3) +#define PINMUX_GPIO161__FUNC_ANT_SEL6 (MTK_PIN_NO(161) | 4) +#define PINMUX_GPIO161__FUNC_KPCOL2 (MTK_PIN_NO(161) | 5) +#define PINMUX_GPIO161__FUNC_PTA_RXD (MTK_PIN_NO(161) | 6) +#define PINMUX_GPIO161__FUNC_DBG_MON_B19 (MTK_PIN_NO(161) | 7) + +#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define PINMUX_GPIO162__FUNC_SPI1_A_CSB (MTK_PIN_NO(162) | 1) +#define PINMUX_GPIO162__FUNC_SCP_SPI1_CS (MTK_PIN_NO(162) | 2) +#define PINMUX_GPIO162__FUNC_USB_DRVVBUS (MTK_PIN_NO(162) | 3) +#define PINMUX_GPIO162__FUNC_ANT_SEL5 (MTK_PIN_NO(162) | 4) +#define PINMUX_GPIO162__FUNC_KPROW2 (MTK_PIN_NO(162) | 5) +#define PINMUX_GPIO162__FUNC_PTA_TXD (MTK_PIN_NO(162) | 6) + +#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define PINMUX_GPIO163__FUNC_SPI1_A_MO (MTK_PIN_NO(163) | 1) +#define PINMUX_GPIO163__FUNC_SCP_SPI1_MO (MTK_PIN_NO(163) | 2) +#define PINMUX_GPIO163__FUNC_SDA1 (MTK_PIN_NO(163) | 3) +#define PINMUX_GPIO163__FUNC_ANT_SEL4 (MTK_PIN_NO(163) | 4) +#define PINMUX_GPIO163__FUNC_CMMCLK2 (MTK_PIN_NO(163) | 5) +#define PINMUX_GPIO163__FUNC_DMIC_CLK (MTK_PIN_NO(163) | 6) + +#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define PINMUX_GPIO164__FUNC_SPI1_A_CLK (MTK_PIN_NO(164) | 1) +#define PINMUX_GPIO164__FUNC_SCP_SPI1_CK (MTK_PIN_NO(164) | 2) +#define PINMUX_GPIO164__FUNC_SCL1 (MTK_PIN_NO(164) | 3) +#define PINMUX_GPIO164__FUNC_ANT_SEL3 (MTK_PIN_NO(164) | 4) +#define PINMUX_GPIO164__FUNC_CMMCLK3 (MTK_PIN_NO(164) | 5) +#define PINMUX_GPIO164__FUNC_DMIC_DAT (MTK_PIN_NO(164) | 6) + +#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) +#define PINMUX_GPIO165__FUNC_PWM_B (MTK_PIN_NO(165) | 1) +#define PINMUX_GPIO165__FUNC_CMMCLK2 (MTK_PIN_NO(165) | 2) +#define PINMUX_GPIO165__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(165) | 3) +#define PINMUX_GPIO165__FUNC_TDM_MCK_2ND (MTK_PIN_NO(165) | 6) +#define PINMUX_GPIO165__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(165) | 7) + +#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) +#define PINMUX_GPIO166__FUNC_ANT_SEL6 (MTK_PIN_NO(166) | 1) + +#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) +#define PINMUX_GPIO167__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(167) | 1) +#define PINMUX_GPIO167__FUNC_SPM_BSI_EN (MTK_PIN_NO(167) | 2) + +#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) +#define PINMUX_GPIO168__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(168) | 1) +#define PINMUX_GPIO168__FUNC_SPM_BSI_CK (MTK_PIN_NO(168) | 2) + +#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define PINMUX_GPIO169__FUNC_PWM_C (MTK_PIN_NO(169) | 1) +#define PINMUX_GPIO169__FUNC_CMMCLK3 (MTK_PIN_NO(169) | 2) +#define PINMUX_GPIO169__FUNC_CMVREF1 (MTK_PIN_NO(169) | 3) +#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 4) +#define PINMUX_GPIO169__FUNC_AGPS_SYNC (MTK_PIN_NO(169) | 5) +#define PINMUX_GPIO169__FUNC_TDM_BCK_2ND (MTK_PIN_NO(169) | 6) +#define PINMUX_GPIO169__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(169) | 7) + +#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define PINMUX_GPIO170__FUNC_I2S1_BCK (MTK_PIN_NO(170) | 1) +#define PINMUX_GPIO170__FUNC_I2S3_BCK (MTK_PIN_NO(170) | 2) +#define PINMUX_GPIO170__FUNC_SCL7 (MTK_PIN_NO(170) | 3) +#define PINMUX_GPIO170__FUNC_I2S5_BCK (MTK_PIN_NO(170) | 4) +#define PINMUX_GPIO170__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(170) | 5) +#define PINMUX_GPIO170__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(170) | 6) +#define PINMUX_GPIO170__FUNC_ANT_SEL3 (MTK_PIN_NO(170) | 7) + +#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define PINMUX_GPIO171__FUNC_I2S1_LRCK (MTK_PIN_NO(171) | 1) +#define PINMUX_GPIO171__FUNC_I2S3_LRCK (MTK_PIN_NO(171) | 2) +#define PINMUX_GPIO171__FUNC_SDA7 (MTK_PIN_NO(171) | 3) +#define PINMUX_GPIO171__FUNC_I2S5_LRCK (MTK_PIN_NO(171) | 4) +#define PINMUX_GPIO171__FUNC_URXD1 (MTK_PIN_NO(171) | 5) +#define PINMUX_GPIO171__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(171) | 6) +#define PINMUX_GPIO171__FUNC_ANT_SEL4 (MTK_PIN_NO(171) | 7) + +#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define PINMUX_GPIO172__FUNC_I2S1_DO (MTK_PIN_NO(172) | 1) +#define PINMUX_GPIO172__FUNC_I2S3_DO (MTK_PIN_NO(172) | 2) +#define PINMUX_GPIO172__FUNC_SCL8 (MTK_PIN_NO(172) | 3) +#define PINMUX_GPIO172__FUNC_I2S5_DO (MTK_PIN_NO(172) | 4) +#define PINMUX_GPIO172__FUNC_UTXD1 (MTK_PIN_NO(172) | 5) +#define PINMUX_GPIO172__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(172) | 6) +#define PINMUX_GPIO172__FUNC_ANT_SEL5 (MTK_PIN_NO(172) | 7) + +#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define PINMUX_GPIO173__FUNC_I2S1_MCK (MTK_PIN_NO(173) | 1) +#define PINMUX_GPIO173__FUNC_I2S3_MCK (MTK_PIN_NO(173) | 2) +#define PINMUX_GPIO173__FUNC_SDA8 (MTK_PIN_NO(173) | 3) +#define PINMUX_GPIO173__FUNC_I2S5_MCK (MTK_PIN_NO(173) | 4) +#define PINMUX_GPIO173__FUNC_UCTS0 (MTK_PIN_NO(173) | 5) +#define PINMUX_GPIO173__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(173) | 6) +#define PINMUX_GPIO173__FUNC_ANT_SEL6 (MTK_PIN_NO(173) | 7) + +#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define PINMUX_GPIO174__FUNC_I2S2_DI (MTK_PIN_NO(174) | 1) +#define PINMUX_GPIO174__FUNC_I2S0_DI (MTK_PIN_NO(174) | 2) +#define PINMUX_GPIO174__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(174) | 3) +#define PINMUX_GPIO174__FUNC_I2S2_DI2 (MTK_PIN_NO(174) | 4) +#define PINMUX_GPIO174__FUNC_URTS0 (MTK_PIN_NO(174) | 5) +#define PINMUX_GPIO174__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(174) | 6) +#define PINMUX_GPIO174__FUNC_ANT_SEL7 (MTK_PIN_NO(174) | 7) + +#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define PINMUX_GPIO175__FUNC_ANT_SEL7 (MTK_PIN_NO(175) | 1) + +#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) + +#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) + +#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) + +#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) + +#endif /* __MT8183-PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/mt8365-pinfunc.h b/include/dt-bindings/pinctrl/mt8365-pinfunc.h new file mode 100644 index 000000000000..e2ec8af57dcf --- /dev/null +++ b/include/dt-bindings/pinctrl/mt8365-pinfunc.h @@ -0,0 +1,858 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 MediaTek Inc. + */ +#ifndef __MT8365_PINFUNC_H +#define __MT8365_PINFUNC_H + +#include <dt-bindings/pinctrl/mt65xx.h> + +#define MT8365_PIN_0_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT8365_PIN_0_GPIO0__FUNC_DPI_D0 (MTK_PIN_NO(0) | 1) +#define MT8365_PIN_0_GPIO0__FUNC_PWM_A (MTK_PIN_NO(0) | 2) +#define MT8365_PIN_0_GPIO0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3) +#define MT8365_PIN_0_GPIO0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4) +#define MT8365_PIN_0_GPIO0__FUNC_CONN_MCU_TDO (MTK_PIN_NO(0) | 5) +#define MT8365_PIN_0_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7) + +#define MT8365_PIN_1_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT8365_PIN_1_GPIO1__FUNC_DPI_D1 (MTK_PIN_NO(1) | 1) +#define MT8365_PIN_1_GPIO1__FUNC_PWM_B (MTK_PIN_NO(1) | 2) +#define MT8365_PIN_1_GPIO1__FUNC_I2S2_LRCK (MTK_PIN_NO(1) | 3) +#define MT8365_PIN_1_GPIO1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4) +#define MT8365_PIN_1_GPIO1__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(1) | 5) +#define MT8365_PIN_1_GPIO1__FUNC_DBG_MON_A1 (MTK_PIN_NO(1) | 7) + +#define MT8365_PIN_2_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT8365_PIN_2_GPIO2__FUNC_DPI_D2 (MTK_PIN_NO(2) | 1) +#define MT8365_PIN_2_GPIO2__FUNC_PWM_C (MTK_PIN_NO(2) | 2) +#define MT8365_PIN_2_GPIO2__FUNC_I2S2_MCK (MTK_PIN_NO(2) | 3) +#define MT8365_PIN_2_GPIO2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4) +#define MT8365_PIN_2_GPIO2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(2) | 5) +#define MT8365_PIN_2_GPIO2__FUNC_DBG_MON_A2 (MTK_PIN_NO(2) | 7) + +#define MT8365_PIN_3_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT8365_PIN_3_GPIO3__FUNC_DPI_D3 (MTK_PIN_NO(3) | 1) +#define MT8365_PIN_3_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 2) +#define MT8365_PIN_3_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3) +#define MT8365_PIN_3_GPIO3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4) +#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_TCK (MTK_PIN_NO(3) | 5) +#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(3) | 6) +#define MT8365_PIN_3_GPIO3__FUNC_DBG_MON_A3 (MTK_PIN_NO(3) | 7) + +#define MT8365_PIN_4_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT8365_PIN_4_GPIO4__FUNC_DPI_D4 (MTK_PIN_NO(4) | 1) +#define MT8365_PIN_4_GPIO4__FUNC_CLKM1 (MTK_PIN_NO(4) | 2) +#define MT8365_PIN_4_GPIO4__FUNC_I2S1_BCK (MTK_PIN_NO(4) | 3) +#define MT8365_PIN_4_GPIO4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4) +#define MT8365_PIN_4_GPIO4__FUNC_CONN_MCU_TDI (MTK_PIN_NO(4) | 5) +#define MT8365_PIN_4_GPIO4__FUNC_VDEC_TEST_CK (MTK_PIN_NO(4) | 6) +#define MT8365_PIN_4_GPIO4__FUNC_DBG_MON_A4 (MTK_PIN_NO(4) | 7) + +#define MT8365_PIN_5_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT8365_PIN_5_GPIO5__FUNC_DPI_D5 (MTK_PIN_NO(5) | 1) +#define MT8365_PIN_5_GPIO5__FUNC_CLKM2 (MTK_PIN_NO(5) | 2) +#define MT8365_PIN_5_GPIO5__FUNC_I2S1_LRCK (MTK_PIN_NO(5) | 3) +#define MT8365_PIN_5_GPIO5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4) +#define MT8365_PIN_5_GPIO5__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(5) | 5) +#define MT8365_PIN_5_GPIO5__FUNC_MM_TEST_CK (MTK_PIN_NO(5) | 6) +#define MT8365_PIN_5_GPIO5__FUNC_DBG_MON_A5 (MTK_PIN_NO(5) | 7) + +#define MT8365_PIN_6_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT8365_PIN_6_GPIO6__FUNC_DPI_D6 (MTK_PIN_NO(6) | 1) +#define MT8365_PIN_6_GPIO6__FUNC_CLKM3 (MTK_PIN_NO(6) | 2) +#define MT8365_PIN_6_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3) +#define MT8365_PIN_6_GPIO6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4) +#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_TMS (MTK_PIN_NO(6) | 5) +#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(6) | 6) +#define MT8365_PIN_6_GPIO6__FUNC_DBG_MON_A6 (MTK_PIN_NO(6) | 7) + +#define MT8365_PIN_7_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT8365_PIN_7_GPIO7__FUNC_DPI_D7 (MTK_PIN_NO(7) | 1) +#define MT8365_PIN_7_GPIO7__FUNC_I2S1_DO (MTK_PIN_NO(7) | 3) +#define MT8365_PIN_7_GPIO7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4) +#define MT8365_PIN_7_GPIO7__FUNC_CONN_DSP_JCK (MTK_PIN_NO(7) | 5) +#define MT8365_PIN_7_GPIO7__FUNC_DBG_MON_A7 (MTK_PIN_NO(7) | 7) + +#define MT8365_PIN_8_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT8365_PIN_8_GPIO8__FUNC_DPI_D8 (MTK_PIN_NO(8) | 1) +#define MT8365_PIN_8_GPIO8__FUNC_SPI_CLK (MTK_PIN_NO(8) | 2) +#define MT8365_PIN_8_GPIO8__FUNC_I2S0_BCK (MTK_PIN_NO(8) | 3) +#define MT8365_PIN_8_GPIO8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4) +#define MT8365_PIN_8_GPIO8__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(8) | 5) +#define MT8365_PIN_8_GPIO8__FUNC_DBG_MON_A8 (MTK_PIN_NO(8) | 7) + +#define MT8365_PIN_9_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT8365_PIN_9_GPIO9__FUNC_DPI_D9 (MTK_PIN_NO(9) | 1) +#define MT8365_PIN_9_GPIO9__FUNC_SPI_CSB (MTK_PIN_NO(9) | 2) +#define MT8365_PIN_9_GPIO9__FUNC_I2S0_LRCK (MTK_PIN_NO(9) | 3) +#define MT8365_PIN_9_GPIO9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4) +#define MT8365_PIN_9_GPIO9__FUNC_CONN_DSP_JDI (MTK_PIN_NO(9) | 5) +#define MT8365_PIN_9_GPIO9__FUNC_DBG_MON_A9 (MTK_PIN_NO(9) | 7) + +#define MT8365_PIN_10_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT8365_PIN_10_GPIO10__FUNC_DPI_D10 (MTK_PIN_NO(10) | 1) +#define MT8365_PIN_10_GPIO10__FUNC_SPI_MI (MTK_PIN_NO(10) | 2) +#define MT8365_PIN_10_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 3) +#define MT8365_PIN_10_GPIO10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4) +#define MT8365_PIN_10_GPIO10__FUNC_CONN_DSP_JMS (MTK_PIN_NO(10) | 5) +#define MT8365_PIN_10_GPIO10__FUNC_DBG_MON_A10 (MTK_PIN_NO(10) | 7) + +#define MT8365_PIN_11_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT8365_PIN_11_GPIO11__FUNC_DPI_D11 (MTK_PIN_NO(11) | 1) +#define MT8365_PIN_11_GPIO11__FUNC_SPI_MO (MTK_PIN_NO(11) | 2) +#define MT8365_PIN_11_GPIO11__FUNC_I2S0_DI (MTK_PIN_NO(11) | 3) +#define MT8365_PIN_11_GPIO11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 4) +#define MT8365_PIN_11_GPIO11__FUNC_CONN_DSP_JDO (MTK_PIN_NO(11) | 5) +#define MT8365_PIN_11_GPIO11__FUNC_DBG_MON_A11 (MTK_PIN_NO(11) | 7) + +#define MT8365_PIN_12_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT8365_PIN_12_GPIO12__FUNC_DPI_DE (MTK_PIN_NO(12) | 1) +#define MT8365_PIN_12_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 2) +#define MT8365_PIN_12_GPIO12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 3) +#define MT8365_PIN_12_GPIO12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 4) +#define MT8365_PIN_12_GPIO12__FUNC_O_WIFI_TXD (MTK_PIN_NO(12) | 5) +#define MT8365_PIN_12_GPIO12__FUNC_DBG_MON_A12 (MTK_PIN_NO(12) | 7) + +#define MT8365_PIN_13_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC (MTK_PIN_NO(13) | 1) +#define MT8365_PIN_13_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 2) +#define MT8365_PIN_13_GPIO13__FUNC_I2S3_LRCK (MTK_PIN_NO(13) | 3) +#define MT8365_PIN_13_GPIO13__FUNC_EXT_COL (MTK_PIN_NO(13) | 4) +#define MT8365_PIN_13_GPIO13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 5) +#define MT8365_PIN_13_GPIO13__FUNC_DBG_MON_A13 (MTK_PIN_NO(13) | 7) + +#define MT8365_PIN_14_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT8365_PIN_14_GPIO14__FUNC_DPI_CK (MTK_PIN_NO(14) | 1) +#define MT8365_PIN_14_GPIO14__FUNC_UCTS2 (MTK_PIN_NO(14) | 2) +#define MT8365_PIN_14_GPIO14__FUNC_I2S3_MCK (MTK_PIN_NO(14) | 3) +#define MT8365_PIN_14_GPIO14__FUNC_EXT_MDIO (MTK_PIN_NO(14) | 4) +#define MT8365_PIN_14_GPIO14__FUNC_SPDIF_OUT (MTK_PIN_NO(14) | 5) +#define MT8365_PIN_14_GPIO14__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(14) | 6) +#define MT8365_PIN_14_GPIO14__FUNC_DBG_MON_A14 (MTK_PIN_NO(14) | 7) + +#define MT8365_PIN_15_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC (MTK_PIN_NO(15) | 1) +#define MT8365_PIN_15_GPIO15__FUNC_URTS2 (MTK_PIN_NO(15) | 2) +#define MT8365_PIN_15_GPIO15__FUNC_I2S3_DO (MTK_PIN_NO(15) | 3) +#define MT8365_PIN_15_GPIO15__FUNC_EXT_MDC (MTK_PIN_NO(15) | 4) +#define MT8365_PIN_15_GPIO15__FUNC_IRRX (MTK_PIN_NO(15) | 5) +#define MT8365_PIN_15_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 6) +#define MT8365_PIN_15_GPIO15__FUNC_DBG_MON_A15 (MTK_PIN_NO(15) | 7) + +#define MT8365_PIN_16_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT8365_PIN_16_GPIO16__FUNC_DPI_D12 (MTK_PIN_NO(16) | 1) +#define MT8365_PIN_16_GPIO16__FUNC_USB_DRVVBUS (MTK_PIN_NO(16) | 2) +#define MT8365_PIN_16_GPIO16__FUNC_PWM_A (MTK_PIN_NO(16) | 3) +#define MT8365_PIN_16_GPIO16__FUNC_CLKM0 (MTK_PIN_NO(16) | 4) +#define MT8365_PIN_16_GPIO16__FUNC_ANT_SEL0 (MTK_PIN_NO(16) | 5) +#define MT8365_PIN_16_GPIO16__FUNC_TSF_IN (MTK_PIN_NO(16) | 6) +#define MT8365_PIN_16_GPIO16__FUNC_DBG_MON_A16 (MTK_PIN_NO(16) | 7) + +#define MT8365_PIN_17_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT8365_PIN_17_GPIO17__FUNC_DPI_D13 (MTK_PIN_NO(17) | 1) +#define MT8365_PIN_17_GPIO17__FUNC_IDDIG (MTK_PIN_NO(17) | 2) +#define MT8365_PIN_17_GPIO17__FUNC_PWM_B (MTK_PIN_NO(17) | 3) +#define MT8365_PIN_17_GPIO17__FUNC_CLKM1 (MTK_PIN_NO(17) | 4) +#define MT8365_PIN_17_GPIO17__FUNC_ANT_SEL1 (MTK_PIN_NO(17) | 5) +#define MT8365_PIN_17_GPIO17__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 6) +#define MT8365_PIN_17_GPIO17__FUNC_DBG_MON_A17 (MTK_PIN_NO(17) | 7) + +#define MT8365_PIN_18_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT8365_PIN_18_GPIO18__FUNC_DPI_D14 (MTK_PIN_NO(18) | 1) +#define MT8365_PIN_18_GPIO18__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(18) | 2) +#define MT8365_PIN_18_GPIO18__FUNC_PWM_C (MTK_PIN_NO(18) | 3) +#define MT8365_PIN_18_GPIO18__FUNC_CLKM2 (MTK_PIN_NO(18) | 4) +#define MT8365_PIN_18_GPIO18__FUNC_ANT_SEL2 (MTK_PIN_NO(18) | 5) +#define MT8365_PIN_18_GPIO18__FUNC_MFG_TEST_CK (MTK_PIN_NO(18) | 6) +#define MT8365_PIN_18_GPIO18__FUNC_DBG_MON_A18 (MTK_PIN_NO(18) | 7) + +#define MT8365_PIN_19_DISP_PWM__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT8365_PIN_19_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(19) | 1) +#define MT8365_PIN_19_DISP_PWM__FUNC_PWM_A (MTK_PIN_NO(19) | 2) +#define MT8365_PIN_19_DISP_PWM__FUNC_DBG_MON_A19 (MTK_PIN_NO(19) | 7) + +#define MT8365_PIN_20_LCM_RST__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT8365_PIN_20_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(20) | 1) +#define MT8365_PIN_20_LCM_RST__FUNC_PWM_B (MTK_PIN_NO(20) | 2) +#define MT8365_PIN_20_LCM_RST__FUNC_DBG_MON_A20 (MTK_PIN_NO(20) | 7) + +#define MT8365_PIN_21_DSI_TE__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT8365_PIN_21_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(21) | 1) +#define MT8365_PIN_21_DSI_TE__FUNC_PWM_C (MTK_PIN_NO(21) | 2) +#define MT8365_PIN_21_DSI_TE__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 3) +#define MT8365_PIN_21_DSI_TE__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(21) | 4) +#define MT8365_PIN_21_DSI_TE__FUNC_DBG_MON_A21 (MTK_PIN_NO(21) | 7) + +#define MT8365_PIN_22_KPROW0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT8365_PIN_22_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(22) | 1) +#define MT8365_PIN_22_KPROW0__FUNC_DBG_MON_A22 (MTK_PIN_NO(22) | 7) + +#define MT8365_PIN_23_KPROW1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT8365_PIN_23_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(23) | 1) +#define MT8365_PIN_23_KPROW1__FUNC_IDDIG (MTK_PIN_NO(23) | 2) +#define MT8365_PIN_23_KPROW1__FUNC_WIFI_TXD (MTK_PIN_NO(23) | 3) +#define MT8365_PIN_23_KPROW1__FUNC_CLKM3 (MTK_PIN_NO(23) | 4) +#define MT8365_PIN_23_KPROW1__FUNC_ANT_SEL1 (MTK_PIN_NO(23) | 5) +#define MT8365_PIN_23_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 6) +#define MT8365_PIN_23_KPROW1__FUNC_DBG_MON_B0 (MTK_PIN_NO(23) | 7) + +#define MT8365_PIN_24_KPCOL0__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT8365_PIN_24_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(24) | 1) +#define MT8365_PIN_24_KPCOL0__FUNC_DBG_MON_A23 (MTK_PIN_NO(24) | 7) + +#define MT8365_PIN_25_KPCOL1__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT8365_PIN_25_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(25) | 1) +#define MT8365_PIN_25_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(25) | 2) +#define MT8365_PIN_25_KPCOL1__FUNC_APU_JTAG_TRST (MTK_PIN_NO(25) | 3) +#define MT8365_PIN_25_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(25) | 4) +#define MT8365_PIN_25_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(25) | 5) +#define MT8365_PIN_25_KPCOL1__FUNC_CONN_TEST_CK (MTK_PIN_NO(25) | 6) +#define MT8365_PIN_25_KPCOL1__FUNC_DBG_MON_B1 (MTK_PIN_NO(25) | 7) + +#define MT8365_PIN_26_SPI_CS__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT8365_PIN_26_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(26) | 1) +#define MT8365_PIN_26_SPI_CS__FUNC_APU_JTAG_TMS (MTK_PIN_NO(26) | 3) +#define MT8365_PIN_26_SPI_CS__FUNC_UDI_TMS_XI (MTK_PIN_NO(26) | 4) +#define MT8365_PIN_26_SPI_CS__FUNC_DFD_TMS_XI (MTK_PIN_NO(26) | 5) +#define MT8365_PIN_26_SPI_CS__FUNC_CONN_TEST_CK (MTK_PIN_NO(26) | 6) +#define MT8365_PIN_26_SPI_CS__FUNC_DBG_MON_A24 (MTK_PIN_NO(26) | 7) + +#define MT8365_PIN_27_SPI_CK__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT8365_PIN_27_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(27) | 1) +#define MT8365_PIN_27_SPI_CK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(27) | 3) +#define MT8365_PIN_27_SPI_CK__FUNC_UDI_TCK_XI (MTK_PIN_NO(27) | 4) +#define MT8365_PIN_27_SPI_CK__FUNC_DFD_TCK_XI (MTK_PIN_NO(27) | 5) +#define MT8365_PIN_27_SPI_CK__FUNC_APU_TEST_CK (MTK_PIN_NO(27) | 6) +#define MT8365_PIN_27_SPI_CK__FUNC_DBG_MON_A25 (MTK_PIN_NO(27) | 7) + +#define MT8365_PIN_28_SPI_MI__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(28) | 1) +#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(28) | 2) +#define MT8365_PIN_28_SPI_MI__FUNC_APU_JTAG_TDI (MTK_PIN_NO(28) | 3) +#define MT8365_PIN_28_SPI_MI__FUNC_UDI_TDI_XI (MTK_PIN_NO(28) | 4) +#define MT8365_PIN_28_SPI_MI__FUNC_DFD_TDI_XI (MTK_PIN_NO(28) | 5) +#define MT8365_PIN_28_SPI_MI__FUNC_DSP_TEST_CK (MTK_PIN_NO(28) | 6) +#define MT8365_PIN_28_SPI_MI__FUNC_DBG_MON_A26 (MTK_PIN_NO(28) | 7) + +#define MT8365_PIN_29_SPI_MO__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(29) | 1) +#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(29) | 2) +#define MT8365_PIN_29_SPI_MO__FUNC_APU_JTAG_TDO (MTK_PIN_NO(29) | 3) +#define MT8365_PIN_29_SPI_MO__FUNC_UDI_TDO (MTK_PIN_NO(29) | 4) +#define MT8365_PIN_29_SPI_MO__FUNC_DFD_TDO (MTK_PIN_NO(29) | 5) +#define MT8365_PIN_29_SPI_MO__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(29) | 6) +#define MT8365_PIN_29_SPI_MO__FUNC_DBG_MON_A27 (MTK_PIN_NO(29) | 7) + +#define MT8365_PIN_30_JTMS__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT8365_PIN_30_JTMS__FUNC_JTMS (MTK_PIN_NO(30) | 1) +#define MT8365_PIN_30_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(30) | 2) +#define MT8365_PIN_30_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(30) | 3) +#define MT8365_PIN_30_JTMS__FUNC_MCU_SPM_TMS (MTK_PIN_NO(30) | 4) +#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(30) | 5) +#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 6) + +#define MT8365_PIN_31_JTCK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT8365_PIN_31_JTCK__FUNC_JTCK (MTK_PIN_NO(31) | 1) +#define MT8365_PIN_31_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(31) | 2) +#define MT8365_PIN_31_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(31) | 3) +#define MT8365_PIN_31_JTCK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(31) | 4) +#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(31) | 5) +#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(31) | 6) + +#define MT8365_PIN_32_JTDI__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT8365_PIN_32_JTDI__FUNC_JTDI (MTK_PIN_NO(32) | 1) +#define MT8365_PIN_32_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(32) | 2) +#define MT8365_PIN_32_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(32) | 3) +#define MT8365_PIN_32_JTDI__FUNC_MCU_SPM_TDI (MTK_PIN_NO(32) | 4) +#define MT8365_PIN_32_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 5) + +#define MT8365_PIN_33_JTDO__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT8365_PIN_33_JTDO__FUNC_JTDO (MTK_PIN_NO(33) | 1) +#define MT8365_PIN_33_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(33) | 2) +#define MT8365_PIN_33_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(33) | 3) +#define MT8365_PIN_33_JTDO__FUNC_MCU_SPM_TDO (MTK_PIN_NO(33) | 4) +#define MT8365_PIN_33_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(33) | 5) + +#define MT8365_PIN_34_JTRST__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT8365_PIN_34_JTRST__FUNC_JTRST (MTK_PIN_NO(34) | 1) +#define MT8365_PIN_34_JTRST__FUNC_DFD_NTRST_XI (MTK_PIN_NO(34) | 2) +#define MT8365_PIN_34_JTRST__FUNC_UDI_NTRST_XI (MTK_PIN_NO(34) | 3) +#define MT8365_PIN_34_JTRST__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(34) | 4) +#define MT8365_PIN_34_JTRST__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5) + +#define MT8365_PIN_35_URXD0__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT8365_PIN_35_URXD0__FUNC_URXD0 (MTK_PIN_NO(35) | 1) +#define MT8365_PIN_35_URXD0__FUNC_UTXD0 (MTK_PIN_NO(35) | 2) +#define MT8365_PIN_35_URXD0__FUNC_DSP_URXD0 (MTK_PIN_NO(35) | 7) + +#define MT8365_PIN_36_UTXD0__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT8365_PIN_36_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(36) | 1) +#define MT8365_PIN_36_UTXD0__FUNC_URXD0 (MTK_PIN_NO(36) | 2) +#define MT8365_PIN_36_UTXD0__FUNC_DSP_UTXD0 (MTK_PIN_NO(36) | 7) + +#define MT8365_PIN_37_URXD1__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT8365_PIN_37_URXD1__FUNC_URXD1 (MTK_PIN_NO(37) | 1) +#define MT8365_PIN_37_URXD1__FUNC_UTXD1 (MTK_PIN_NO(37) | 2) +#define MT8365_PIN_37_URXD1__FUNC_UCTS2 (MTK_PIN_NO(37) | 3) +#define MT8365_PIN_37_URXD1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(37) | 4) +#define MT8365_PIN_37_URXD1__FUNC_CONN_UART0_RXD (MTK_PIN_NO(37) | 5) +#define MT8365_PIN_37_URXD1__FUNC_I2S0_MCK (MTK_PIN_NO(37) | 6) +#define MT8365_PIN_37_URXD1__FUNC_DSP_URXD0 (MTK_PIN_NO(37) | 7) + +#define MT8365_PIN_38_UTXD1__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT8365_PIN_38_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(38) | 1) +#define MT8365_PIN_38_UTXD1__FUNC_URXD1 (MTK_PIN_NO(38) | 2) +#define MT8365_PIN_38_UTXD1__FUNC_URTS2 (MTK_PIN_NO(38) | 3) +#define MT8365_PIN_38_UTXD1__FUNC_ANT_SEL2 (MTK_PIN_NO(38) | 4) +#define MT8365_PIN_38_UTXD1__FUNC_CONN_UART0_TXD (MTK_PIN_NO(38) | 5) +#define MT8365_PIN_38_UTXD1__FUNC_I2S1_MCK (MTK_PIN_NO(38) | 6) +#define MT8365_PIN_38_UTXD1__FUNC_DSP_UTXD0 (MTK_PIN_NO(38) | 7) + +#define MT8365_PIN_39_URXD2__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT8365_PIN_39_URXD2__FUNC_URXD2 (MTK_PIN_NO(39) | 1) +#define MT8365_PIN_39_URXD2__FUNC_UTXD2 (MTK_PIN_NO(39) | 2) +#define MT8365_PIN_39_URXD2__FUNC_UCTS1 (MTK_PIN_NO(39) | 3) +#define MT8365_PIN_39_URXD2__FUNC_IDDIG (MTK_PIN_NO(39) | 4) +#define MT8365_PIN_39_URXD2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(39) | 5) +#define MT8365_PIN_39_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(39) | 6) +#define MT8365_PIN_39_URXD2__FUNC_DSP_URXD0 (MTK_PIN_NO(39) | 7) + +#define MT8365_PIN_40_UTXD2__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT8365_PIN_40_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(40) | 1) +#define MT8365_PIN_40_UTXD2__FUNC_URXD2 (MTK_PIN_NO(40) | 2) +#define MT8365_PIN_40_UTXD2__FUNC_URTS1 (MTK_PIN_NO(40) | 3) +#define MT8365_PIN_40_UTXD2__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 4) +#define MT8365_PIN_40_UTXD2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(40) | 5) +#define MT8365_PIN_40_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(40) | 6) +#define MT8365_PIN_40_UTXD2__FUNC_DSP_UTXD0 (MTK_PIN_NO(40) | 7) + +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(41) | 1) +#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(41) | 2) + +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1) +#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2) + +#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(43) | 1) + +#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(44) | 1) + +#define MT8365_PIN_45_RTC32K_CK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT8365_PIN_45_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(45) | 1) + +#define MT8365_PIN_46_WATCHDOG__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT8365_PIN_46_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(46) | 1) + +#define MT8365_PIN_47_SRCLKENA0__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(47) | 1) +#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA1 (MTK_PIN_NO(47) | 2) + +#define MT8365_PIN_48_SRCLKENA1__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT8365_PIN_48_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(48) | 1) + +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(49) | 1) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MISO (MTK_PIN_NO(49) | 2) +#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_I2S1_MCK (MTK_PIN_NO(49) | 3) + +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(50) | 1) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(50) | 2) +#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_I2S1_BCK (MTK_PIN_NO(50) | 3) + +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(51) | 1) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(51) | 2) +#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_I2S1_LRCK (MTK_PIN_NO(51) | 3) + +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(52) | 1) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(52) | 2) +#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_I2S1_DO (MTK_PIN_NO(52) | 3) + +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO (MTK_PIN_NO(53) | 1) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(53) | 2) +#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_I2S2_MCK (MTK_PIN_NO(53) | 3) + +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(54) | 1) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(54) | 2) +#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_I2S2_BCK (MTK_PIN_NO(54) | 3) + +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(55) | 1) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(55) | 2) +#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_I2S2_LRCK (MTK_PIN_NO(55) | 3) + +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(56) | 1) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(56) | 2) +#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_I2S2_DI (MTK_PIN_NO(56) | 3) + +#define MT8365_PIN_57_SDA0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT8365_PIN_57_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(57) | 1) + +#define MT8365_PIN_58_SCL0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT8365_PIN_58_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(58) | 1) + +#define MT8365_PIN_59_SDA1__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define MT8365_PIN_59_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(59) | 1) +#define MT8365_PIN_59_SDA1__FUNC_USB_SDA (MTK_PIN_NO(59) | 6) +#define MT8365_PIN_59_SDA1__FUNC_DBG_SDA (MTK_PIN_NO(59) | 7) + +#define MT8365_PIN_60_SCL1__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT8365_PIN_60_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(60) | 1) +#define MT8365_PIN_60_SCL1__FUNC_USB_SCL (MTK_PIN_NO(60) | 6) +#define MT8365_PIN_60_SCL1__FUNC_DBG_SCL (MTK_PIN_NO(60) | 7) + +#define MT8365_PIN_61_SDA2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT8365_PIN_61_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(61) | 1) + +#define MT8365_PIN_62_SCL2__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT8365_PIN_62_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(62) | 1) + +#define MT8365_PIN_63_SDA3__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT8365_PIN_63_SDA3__FUNC_SDA3_0 (MTK_PIN_NO(63) | 1) + +#define MT8365_PIN_64_SCL3__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT8365_PIN_64_SCL3__FUNC_SCL3_0 (MTK_PIN_NO(64) | 1) + +#define MT8365_PIN_65_CMMCLK0__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK0 (MTK_PIN_NO(65) | 1) +#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK1 (MTK_PIN_NO(65) | 2) +#define MT8365_PIN_65_CMMCLK0__FUNC_DBG_MON_A28 (MTK_PIN_NO(65) | 7) + +#define MT8365_PIN_66_CMMCLK1__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK1 (MTK_PIN_NO(66) | 1) +#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK0 (MTK_PIN_NO(66) | 2) +#define MT8365_PIN_66_CMMCLK1__FUNC_DBG_MON_B2 (MTK_PIN_NO(66) | 7) + +#define MT8365_PIN_67_CMPCLK__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT8365_PIN_67_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(67) | 1) +#define MT8365_PIN_67_CMPCLK__FUNC_ANT_SEL0 (MTK_PIN_NO(67) | 2) +#define MT8365_PIN_67_CMPCLK__FUNC_TDM_RX_BCK (MTK_PIN_NO(67) | 4) +#define MT8365_PIN_67_CMPCLK__FUNC_I2S0_BCK (MTK_PIN_NO(67) | 5) +#define MT8365_PIN_67_CMPCLK__FUNC_DBG_MON_B3 (MTK_PIN_NO(67) | 7) + +#define MT8365_PIN_68_CMDAT0__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT8365_PIN_68_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(68) | 1) +#define MT8365_PIN_68_CMDAT0__FUNC_ANT_SEL1 (MTK_PIN_NO(68) | 2) +#define MT8365_PIN_68_CMDAT0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(68) | 4) +#define MT8365_PIN_68_CMDAT0__FUNC_I2S0_LRCK (MTK_PIN_NO(68) | 5) +#define MT8365_PIN_68_CMDAT0__FUNC_DBG_MON_B4 (MTK_PIN_NO(68) | 7) + +#define MT8365_PIN_69_CMDAT1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT8365_PIN_69_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(69) | 1) +#define MT8365_PIN_69_CMDAT1__FUNC_ANT_SEL2 (MTK_PIN_NO(69) | 2) +#define MT8365_PIN_69_CMDAT1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 3) +#define MT8365_PIN_69_CMDAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(69) | 4) +#define MT8365_PIN_69_CMDAT1__FUNC_I2S0_MCK (MTK_PIN_NO(69) | 5) +#define MT8365_PIN_69_CMDAT1__FUNC_DBG_MON_B5 (MTK_PIN_NO(69) | 7) + +#define MT8365_PIN_70_CMDAT2__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT8365_PIN_70_CMDAT2__FUNC_CMDAT2 (MTK_PIN_NO(70) | 1) +#define MT8365_PIN_70_CMDAT2__FUNC_ANT_SEL3 (MTK_PIN_NO(70) | 2) +#define MT8365_PIN_70_CMDAT2__FUNC_TDM_RX_DI (MTK_PIN_NO(70) | 4) +#define MT8365_PIN_70_CMDAT2__FUNC_I2S0_DI (MTK_PIN_NO(70) | 5) +#define MT8365_PIN_70_CMDAT2__FUNC_DBG_MON_B6 (MTK_PIN_NO(70) | 7) + +#define MT8365_PIN_71_CMDAT3__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT8365_PIN_71_CMDAT3__FUNC_CMDAT3 (MTK_PIN_NO(71) | 1) +#define MT8365_PIN_71_CMDAT3__FUNC_ANT_SEL4 (MTK_PIN_NO(71) | 2) +#define MT8365_PIN_71_CMDAT3__FUNC_DBG_MON_B7 (MTK_PIN_NO(71) | 7) + +#define MT8365_PIN_72_CMDAT4__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT8365_PIN_72_CMDAT4__FUNC_CMDAT4 (MTK_PIN_NO(72) | 1) +#define MT8365_PIN_72_CMDAT4__FUNC_ANT_SEL5 (MTK_PIN_NO(72) | 2) +#define MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK (MTK_PIN_NO(72) | 5) +#define MT8365_PIN_72_CMDAT4__FUNC_DBG_MON_B8 (MTK_PIN_NO(72) | 7) + +#define MT8365_PIN_73_CMDAT5__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT8365_PIN_73_CMDAT5__FUNC_CMDAT5 (MTK_PIN_NO(73) | 1) +#define MT8365_PIN_73_CMDAT5__FUNC_ANT_SEL6 (MTK_PIN_NO(73) | 2) +#define MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK (MTK_PIN_NO(73) | 5) +#define MT8365_PIN_73_CMDAT5__FUNC_DBG_MON_B9 (MTK_PIN_NO(73) | 7) + +#define MT8365_PIN_74_CMDAT6__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT8365_PIN_74_CMDAT6__FUNC_CMDAT6 (MTK_PIN_NO(74) | 1) +#define MT8365_PIN_74_CMDAT6__FUNC_ANT_SEL7 (MTK_PIN_NO(74) | 2) +#define MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK (MTK_PIN_NO(74) | 5) +#define MT8365_PIN_74_CMDAT6__FUNC_DBG_MON_B10 (MTK_PIN_NO(74) | 7) + +#define MT8365_PIN_75_CMDAT7__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT8365_PIN_75_CMDAT7__FUNC_CMDAT7 (MTK_PIN_NO(75) | 1) +#define MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO (MTK_PIN_NO(75) | 5) +#define MT8365_PIN_75_CMDAT7__FUNC_DBG_MON_B11 (MTK_PIN_NO(75) | 7) + +#define MT8365_PIN_76_CMDAT8__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT8365_PIN_76_CMDAT8__FUNC_CMDAT8 (MTK_PIN_NO(76) | 1) +#define MT8365_PIN_76_CMDAT8__FUNC_PCM_CLK (MTK_PIN_NO(76) | 5) +#define MT8365_PIN_76_CMDAT8__FUNC_DBG_MON_A29 (MTK_PIN_NO(76) | 7) + +#define MT8365_PIN_77_CMDAT9__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT8365_PIN_77_CMDAT9__FUNC_CMDAT9 (MTK_PIN_NO(77) | 1) +#define MT8365_PIN_77_CMDAT9__FUNC_PCM_SYNC (MTK_PIN_NO(77) | 5) +#define MT8365_PIN_77_CMDAT9__FUNC_DBG_MON_A30 (MTK_PIN_NO(77) | 7) + +#define MT8365_PIN_78_CMHSYNC__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT8365_PIN_78_CMHSYNC__FUNC_CMHSYNC (MTK_PIN_NO(78) | 1) +#define MT8365_PIN_78_CMHSYNC__FUNC_PCM_RX (MTK_PIN_NO(78) | 5) +#define MT8365_PIN_78_CMHSYNC__FUNC_DBG_MON_A31 (MTK_PIN_NO(78) | 7) + +#define MT8365_PIN_79_CMVSYNC__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT8365_PIN_79_CMVSYNC__FUNC_CMVSYNC (MTK_PIN_NO(79) | 1) +#define MT8365_PIN_79_CMVSYNC__FUNC_PCM_TX (MTK_PIN_NO(79) | 5) +#define MT8365_PIN_79_CMVSYNC__FUNC_DBG_MON_A32 (MTK_PIN_NO(79) | 7) + +#define MT8365_PIN_80_MSDC2_CMD__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(80) | 1) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_TDM_TX_LRCK (MTK_PIN_NO(80) | 2) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_UTXD1 (MTK_PIN_NO(80) | 3) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_DPI_D19 (MTK_PIN_NO(80) | 4) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_UDI_TMS_XI (MTK_PIN_NO(80) | 5) +#define MT8365_PIN_80_MSDC2_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(80) | 6) + +#define MT8365_PIN_81_MSDC2_CLK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(81) | 1) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_TDM_TX_BCK (MTK_PIN_NO(81) | 2) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_URXD1 (MTK_PIN_NO(81) | 3) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_DPI_D20 (MTK_PIN_NO(81) | 4) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_UDI_TCK_XI (MTK_PIN_NO(81) | 5) +#define MT8365_PIN_81_MSDC2_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(81) | 6) + +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(82) | 2) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UTXD2 (MTK_PIN_NO(82) | 3) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_DPI_D21 (MTK_PIN_NO(82) | 4) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UDI_TDI_XI (MTK_PIN_NO(82) | 5) +#define MT8365_PIN_82_MSDC2_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(82) | 6) + +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(83) | 1) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(83) | 2) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_URXD2 (MTK_PIN_NO(83) | 3) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_DPI_D22 (MTK_PIN_NO(83) | 4) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_UDI_TDO (MTK_PIN_NO(83) | 5) +#define MT8365_PIN_83_MSDC2_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(83) | 6) + +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(84) | 1) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(84) | 2) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_PWM_A (MTK_PIN_NO(84) | 3) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(84) | 4) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_UDI_NTRST_XI (MTK_PIN_NO(84) | 5) +#define MT8365_PIN_84_MSDC2_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(84) | 6) + +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(85) | 1) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(85) | 2) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_PWM_B (MTK_PIN_NO(85) | 3) +#define MT8365_PIN_85_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(85) | 5) + +#define MT8365_PIN_86_MSDC2_DSL__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_MSDC2_DSL (MTK_PIN_NO(86) | 1) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_TDM_TX_MCK (MTK_PIN_NO(86) | 2) +#define MT8365_PIN_86_MSDC2_DSL__FUNC_PWM_C (MTK_PIN_NO(86) | 3) + +#define MT8365_PIN_87_MSDC1_CMD__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(87) | 1) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(87) | 2) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_DFD_TMS_XI (MTK_PIN_NO(87) | 3) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_APU_JTAG_TMS (MTK_PIN_NO(87) | 4) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_MCU_SPM_TMS (MTK_PIN_NO(87) | 5) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_DSP_JMS (MTK_PIN_NO(87) | 6) +#define MT8365_PIN_87_MSDC1_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(87) | 7) + +#define MT8365_PIN_88_MSDC1_CLK__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(88) | 1) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 2) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 3) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(88) | 4) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(88) | 5) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(88) | 6) +#define MT8365_PIN_88_MSDC1_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(88) | 7) + +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(89) | 1) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_PWM_C (MTK_PIN_NO(89) | 2) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_DFD_TDI_XI (MTK_PIN_NO(89) | 3) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_APU_JTAG_TDI (MTK_PIN_NO(89) | 4) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MCU_SPM_TDI (MTK_PIN_NO(89) | 5) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_CONN_DSP_JDI (MTK_PIN_NO(89) | 6) +#define MT8365_PIN_89_MSDC1_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(89) | 7) + +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(90) | 1) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_SPDIF_IN (MTK_PIN_NO(90) | 2) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_DFD_TDO (MTK_PIN_NO(90) | 3) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_APU_JTAG_TDO (MTK_PIN_NO(90) | 4) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MCU_SPM_TDO (MTK_PIN_NO(90) | 5) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_CONN_DSP_JDO (MTK_PIN_NO(90) | 6) +#define MT8365_PIN_90_MSDC1_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(90) | 7) + +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(91) | 1) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_SPDIF_OUT (MTK_PIN_NO(91) | 2) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_DFD_NTRST_XI (MTK_PIN_NO(91) | 3) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_APU_JTAG_TRST (MTK_PIN_NO(91) | 4) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(91) | 5) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(91) | 6) +#define MT8365_PIN_91_MSDC1_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(91) | 7) + +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(92) | 1) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_IRRX (MTK_PIN_NO(92) | 2) +#define MT8365_PIN_92_MSDC1_DAT3__FUNC_PWM_A (MTK_PIN_NO(92) | 3) + +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(93) | 1) +#define MT8365_PIN_93_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(93) | 2) + +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(94) | 1) +#define MT8365_PIN_94_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(94) | 2) + +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(95) | 1) +#define MT8365_PIN_95_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(95) | 2) + +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(96) | 1) +#define MT8365_PIN_96_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(96) | 2) + +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(97) | 1) +#define MT8365_PIN_97_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(97) | 2) + +#define MT8365_PIN_98_MSDC0_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT8365_PIN_98_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(98) | 1) +#define MT8365_PIN_98_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(98) | 2) + +#define MT8365_PIN_99_MSDC0_CLK__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(99) | 1) +#define MT8365_PIN_99_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(99) | 2) + +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(100) | 1) +#define MT8365_PIN_100_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(100) | 2) + +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(101) | 1) +#define MT8365_PIN_101_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(101) | 2) + +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(102) | 1) +#define MT8365_PIN_102_MSDC0_DAT1__FUNC_NDQS (MTK_PIN_NO(102) | 2) + +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(103) | 1) +#define MT8365_PIN_103_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(103) | 2) + +#define MT8365_PIN_104_MSDC0_DSL__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT8365_PIN_104_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(104) | 1) + +#define MT8365_PIN_105_NCLE__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT8365_PIN_105_NCLE__FUNC_NCLE (MTK_PIN_NO(105) | 1) +#define MT8365_PIN_105_NCLE__FUNC_TDM_RX_MCK (MTK_PIN_NO(105) | 2) +#define MT8365_PIN_105_NCLE__FUNC_DBG_MON_B12 (MTK_PIN_NO(105) | 7) + +#define MT8365_PIN_106_NCEB1__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT8365_PIN_106_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(106) | 1) +#define MT8365_PIN_106_NCEB1__FUNC_TDM_RX_BCK (MTK_PIN_NO(106) | 2) +#define MT8365_PIN_106_NCEB1__FUNC_DBG_MON_B13 (MTK_PIN_NO(106) | 7) + +#define MT8365_PIN_107_NCEB0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT8365_PIN_107_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(107) | 1) +#define MT8365_PIN_107_NCEB0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(107) | 2) +#define MT8365_PIN_107_NCEB0__FUNC_DBG_MON_B14 (MTK_PIN_NO(107) | 7) + +#define MT8365_PIN_108_NREB__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT8365_PIN_108_NREB__FUNC_NREB (MTK_PIN_NO(108) | 1) +#define MT8365_PIN_108_NREB__FUNC_TDM_RX_DI (MTK_PIN_NO(108) | 2) +#define MT8365_PIN_108_NREB__FUNC_DBG_MON_B15 (MTK_PIN_NO(108) | 7) + +#define MT8365_PIN_109_NRNB__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT8365_PIN_109_NRNB__FUNC_NRNB (MTK_PIN_NO(109) | 1) +#define MT8365_PIN_109_NRNB__FUNC_TSF_IN (MTK_PIN_NO(109) | 2) +#define MT8365_PIN_109_NRNB__FUNC_DBG_MON_B16 (MTK_PIN_NO(109) | 7) + +#define MT8365_PIN_110_PCM_CLK__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT8365_PIN_110_PCM_CLK__FUNC_PCM_CLK (MTK_PIN_NO(110) | 1) +#define MT8365_PIN_110_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(110) | 2) +#define MT8365_PIN_110_PCM_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 3) +#define MT8365_PIN_110_PCM_CLK__FUNC_SPDIF_IN (MTK_PIN_NO(110) | 4) +#define MT8365_PIN_110_PCM_CLK__FUNC_DPI_D15 (MTK_PIN_NO(110) | 5) + +#define MT8365_PIN_111_PCM_SYNC__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT8365_PIN_111_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(111) | 1) +#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(111) | 2) +#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 3) +#define MT8365_PIN_111_PCM_SYNC__FUNC_SPDIF_OUT (MTK_PIN_NO(111) | 4) +#define MT8365_PIN_111_PCM_SYNC__FUNC_DPI_D16 (MTK_PIN_NO(111) | 5) + +#define MT8365_PIN_112_PCM_RX__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT8365_PIN_112_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(112) | 1) +#define MT8365_PIN_112_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2) +#define MT8365_PIN_112_PCM_RX__FUNC_I2S3_MCK (MTK_PIN_NO(112) | 3) +#define MT8365_PIN_112_PCM_RX__FUNC_IRRX (MTK_PIN_NO(112) | 4) +#define MT8365_PIN_112_PCM_RX__FUNC_DPI_D17 (MTK_PIN_NO(112) | 5) + +#define MT8365_PIN_113_PCM_TX__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT8365_PIN_113_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(113) | 1) +#define MT8365_PIN_113_PCM_TX__FUNC_I2S0_MCK (MTK_PIN_NO(113) | 2) +#define MT8365_PIN_113_PCM_TX__FUNC_I2S3_DO (MTK_PIN_NO(113) | 3) +#define MT8365_PIN_113_PCM_TX__FUNC_PWM_B (MTK_PIN_NO(113) | 4) +#define MT8365_PIN_113_PCM_TX__FUNC_DPI_D18 (MTK_PIN_NO(113) | 5) + +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(114) | 1) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S1_DO (MTK_PIN_NO(114) | 2) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S2_DI (MTK_PIN_NO(114) | 3) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(114) | 4) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(114) | 5) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_SPDIF_IN (MTK_PIN_NO(114) | 6) +#define MT8365_PIN_114_I2S_DATA_IN__FUNC_DBG_MON_B17 (MTK_PIN_NO(114) | 7) + +#define MT8365_PIN_115_I2S_LRCK__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(115) | 1) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(115) | 2) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S2_LRCK (MTK_PIN_NO(115) | 3) +#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(115) | 4) +#define MT8365_PIN_115_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(115) | 5) +#define MT8365_PIN_115_I2S_LRCK__FUNC_SPDIF_OUT (MTK_PIN_NO(115) | 6) +#define MT8365_PIN_115_I2S_LRCK__FUNC_DBG_MON_B18 (MTK_PIN_NO(115) | 7) + +#define MT8365_PIN_116_I2S_BCK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(116) | 1) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(116) | 2) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(116) | 3) +#define MT8365_PIN_116_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(116) | 4) +#define MT8365_PIN_116_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(116) | 5) +#define MT8365_PIN_116_I2S_BCK__FUNC_IRRX (MTK_PIN_NO(116) | 6) +#define MT8365_PIN_116_I2S_BCK__FUNC_DBG_MON_B19 (MTK_PIN_NO(116) | 7) + +#define MT8365_PIN_117_DMIC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK (MTK_PIN_NO(117) | 1) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_I2S2_BCK (MTK_PIN_NO(117) | 2) +#define MT8365_PIN_117_DMIC0_CLK__FUNC_DBG_MON_B20 (MTK_PIN_NO(117) | 7) + +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0 (MTK_PIN_NO(118) | 1) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_I2S2_DI (MTK_PIN_NO(118) | 2) +#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DBG_MON_B21 (MTK_PIN_NO(118) | 7) + +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1 (MTK_PIN_NO(119) | 1) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_I2S2_LRCK (MTK_PIN_NO(119) | 2) +#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DBG_MON_B22 (MTK_PIN_NO(119) | 7) + +#define MT8365_PIN_120_DMIC1_CLK__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_DMIC1_CLK (MTK_PIN_NO(120) | 1) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_I2S2_MCK (MTK_PIN_NO(120) | 2) +#define MT8365_PIN_120_DMIC1_CLK__FUNC_DBG_MON_B23 (MTK_PIN_NO(120) | 7) + +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DMIC1_DAT0 (MTK_PIN_NO(121) | 1) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_I2S1_BCK (MTK_PIN_NO(121) | 2) +#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DBG_MON_B24 (MTK_PIN_NO(121) | 7) + +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DMIC1_DAT1 (MTK_PIN_NO(122) | 1) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_I2S1_LRCK (MTK_PIN_NO(122) | 2) +#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DBG_MON_B25 (MTK_PIN_NO(122) | 7) + +#define MT8365_PIN_123_DMIC2_CLK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_DMIC2_CLK (MTK_PIN_NO(123) | 1) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_I2S1_MCK (MTK_PIN_NO(123) | 2) +#define MT8365_PIN_123_DMIC2_CLK__FUNC_DBG_MON_B26 (MTK_PIN_NO(123) | 7) + +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DMIC2_DAT0 (MTK_PIN_NO(124) | 1) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_I2S1_DO (MTK_PIN_NO(124) | 2) +#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DBG_MON_B27 (MTK_PIN_NO(124) | 7) + +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DMIC2_DAT1 (MTK_PIN_NO(125) | 1) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(125) | 2) +#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DBG_MON_B28 (MTK_PIN_NO(125) | 7) + +#define MT8365_PIN_126_DMIC3_CLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT8365_PIN_126_DMIC3_CLK__FUNC_DMIC3_CLK (MTK_PIN_NO(126) | 1) +#define MT8365_PIN_126_DMIC3_CLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(126) | 2) + +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_DMIC3_DAT0 (MTK_PIN_NO(127) | 1) +#define MT8365_PIN_127_DMIC3_DAT0__FUNC_TDM_RX_DI (MTK_PIN_NO(127) | 2) + +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_DMIC3_DAT1 (MTK_PIN_NO(128) | 1) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(128) | 2) +#define MT8365_PIN_128_DMIC3_DAT1__FUNC_VAD_CLK (MTK_PIN_NO(128) | 3) + +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_TDM_TX_BCK (MTK_PIN_NO(129) | 1) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(129) | 2) +#define MT8365_PIN_129_TDM_TX_BCK__FUNC_ckmon1_ck (MTK_PIN_NO(129) | 3) + +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_TDM_TX_LRCK (MTK_PIN_NO(130) | 1) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(130) | 2) +#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_ckmon2_ck (MTK_PIN_NO(130) | 3) + +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_TDM_TX_MCK (MTK_PIN_NO(131) | 1) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 2) +#define MT8365_PIN_131_TDM_TX_MCK__FUNC_ckmon3_ck (MTK_PIN_NO(131) | 3) + +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(132) | 1) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_I2S3_DO (MTK_PIN_NO(132) | 2) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_ckmon4_ck (MTK_PIN_NO(132) | 3) +#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_DBG_MON_B29 (MTK_PIN_NO(132) | 7) + +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(133) | 1) +#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_DBG_MON_B30 (MTK_PIN_NO(133) | 7) + +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(134) | 1) +#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_DBG_MON_B31 (MTK_PIN_NO(134) | 7) + +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(135) | 1) +#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_DBG_MON_B32 (MTK_PIN_NO(135) | 7) + +#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_CONN_TOP_CLK (MTK_PIN_NO(136) | 1) + +#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_CONN_TOP_DATA (MTK_PIN_NO(137) | 1) + +#define MT8365_PIN_138_CONN_HRST_B__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define MT8365_PIN_138_CONN_HRST_B__FUNC_CONN_HRST_B (MTK_PIN_NO(138) | 1) + +#define MT8365_PIN_139_CONN_WB_PTA__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define MT8365_PIN_139_CONN_WB_PTA__FUNC_CONN_WB_PTA (MTK_PIN_NO(139) | 1) + +#define MT8365_PIN_140_CONN_BT_CLK__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define MT8365_PIN_140_CONN_BT_CLK__FUNC_CONN_BT_CLK (MTK_PIN_NO(140) | 1) + +#define MT8365_PIN_141_CONN_BT_DATA__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define MT8365_PIN_141_CONN_BT_DATA__FUNC_CONN_BT_DATA (MTK_PIN_NO(141) | 1) + +#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(142) | 1) + +#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(143) | 1) + +#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(144) | 1) + +#endif /* __MT8365_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/pinctrl-zynq.h b/include/dt-bindings/pinctrl/pinctrl-zynq.h new file mode 100644 index 000000000000..bbfc345f017d --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-zynq.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MIO pin configuration defines for Xilinx Zynq + * + * Copyright (C) 2021 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_PINCTRL_ZYNQ_H +#define _DT_BINDINGS_PINCTRL_ZYNQ_H + +/* Configuration options for different power supplies */ +#define IO_STANDARD_LVCMOS18 1 +#define IO_STANDARD_LVCMOS25 2 +#define IO_STANDARD_LVCMOS33 3 +#define IO_STANDARD_HSTL 4 + +#endif /* _DT_BINDINGS_PINCTRL_ZYNQ_H */ diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h new file mode 100644 index 000000000000..b48f8c7a5556 --- /dev/null +++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for Renesas RZ/G2L family pinctrl bindings. + * + * Copyright (C) 2021 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_RZG2L_PINCTRL_H +#define __DT_BINDINGS_RZG2L_PINCTRL_H + +#define RZG2L_PINS_PER_PORT 8 + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16)) + +/* Convert a port and pin label to its global pin index */ + #define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin)) + +#endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */ diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h new file mode 100644 index 000000000000..fc9c2e16aadc --- /dev/null +++ b/include/dt-bindings/power/imx8mm-power.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2020 Pengutronix, Lucas Stach <kernel@pengutronix.de> + */ + +#ifndef __DT_BINDINGS_IMX8MM_POWER_H__ +#define __DT_BINDINGS_IMX8MM_POWER_H__ + +#define IMX8MM_POWER_DOMAIN_HSIOMIX 0 +#define IMX8MM_POWER_DOMAIN_PCIE 1 +#define IMX8MM_POWER_DOMAIN_OTG1 2 +#define IMX8MM_POWER_DOMAIN_OTG2 3 +#define IMX8MM_POWER_DOMAIN_GPUMIX 4 +#define IMX8MM_POWER_DOMAIN_GPU 5 +#define IMX8MM_POWER_DOMAIN_VPUMIX 6 +#define IMX8MM_POWER_DOMAIN_VPUG1 7 +#define IMX8MM_POWER_DOMAIN_VPUG2 8 +#define IMX8MM_POWER_DOMAIN_VPUH1 9 +#define IMX8MM_POWER_DOMAIN_DISPMIX 10 +#define IMX8MM_POWER_DOMAIN_MIPI 11 + +#endif diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h new file mode 100644 index 000000000000..102ee85a9b62 --- /dev/null +++ b/include/dt-bindings/power/imx8mn-power.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2020 Compass Electronics Group, LLC + */ + +#ifndef __DT_BINDINGS_IMX8MN_POWER_H__ +#define __DT_BINDINGS_IMX8MN_POWER_H__ + +#define IMX8MN_POWER_DOMAIN_HSIOMIX 0 +#define IMX8MN_POWER_DOMAIN_OTG1 1 +#define IMX8MN_POWER_DOMAIN_GPUMIX 2 +#define IMX8MN_POWER_DOMAIN_DISPMIX 3 +#define IMX8MN_POWER_DOMAIN_MIPI 4 + +#endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index eedb5d94c020..4533dbbf9937 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -81,6 +81,19 @@ #define SC7280_LCX 7 #define SC7280_MSS 8 +/* SC8180X Power Domain Indexes */ +#define SC8180X_CX 0 +#define SC8180X_CX_AO 1 +#define SC8180X_EBI 2 +#define SC8180X_GFX 3 +#define SC8180X_LCX 4 +#define SC8180X_LMX 5 +#define SC8180X_MMCX 6 +#define SC8180X_MMCX_AO 7 +#define SC8180X_MSS 8 +#define SC8180X_MX 9 +#define SC8180X_MX_AO 10 + /* SDM845 Power Domain performance levels */ #define RPMH_REGULATOR_LEVEL_RETENTION 16 #define RPMH_REGULATOR_LEVEL_MIN_SVS 48 @@ -95,6 +108,14 @@ #define RPMH_REGULATOR_LEVEL_TURBO 384 #define RPMH_REGULATOR_LEVEL_TURBO_L1 416 +/* MDM9607 Power Domains */ +#define MDM9607_VDDCX 0 +#define MDM9607_VDDCX_AO 1 +#define MDM9607_VDDCX_VFL 2 +#define MDM9607_VDDMX 3 +#define MDM9607_VDDMX_AO 4 +#define MDM9607_VDDMX_VFL 5 + /* MSM8939 Power Domains */ #define MSM8939_VDDMDCX 0 #define MSM8939_VDDMDCX_AO 1 @@ -171,6 +192,16 @@ #define SDM660_SSCMX 8 #define SDM660_SSCMX_VFL 9 +/* SM6115 Power Domains */ +#define SM6115_VDDCX 0 +#define SM6115_VDDCX_AO 1 +#define SM6115_VDDCX_VFL 2 +#define SM6115_VDDMX 3 +#define SM6115_VDDMX_AO 4 +#define SM6115_VDDMX_VFL 5 +#define SM6115_VDD_LPI_CX 6 +#define SM6115_VDD_LPI_MX 7 + /* RPM SMD Power Domain performance levels */ #define RPM_SMD_LEVEL_RETENTION 16 #define RPM_SMD_LEVEL_RETENTION_PLUS 32 diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h new file mode 100644 index 000000000000..6cc1af1a9d26 --- /dev/null +++ b/include/dt-bindings/power/rk3568-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__ +#define __DT_BINDINGS_POWER_RK3568_POWER_H__ + +/* VD_CORE */ +#define RK3568_PD_CPU_0 0 +#define RK3568_PD_CPU_1 1 +#define RK3568_PD_CPU_2 2 +#define RK3568_PD_CPU_3 3 +#define RK3568_PD_CORE_ALIVE 4 + +/* VD_PMU */ +#define RK3568_PD_PMU 5 + +/* VD_NPU */ +#define RK3568_PD_NPU 6 + +/* VD_GPU */ +#define RK3568_PD_GPU 7 + +/* VD_LOGIC */ +#define RK3568_PD_VI 8 +#define RK3568_PD_VO 9 +#define RK3568_PD_RGA 10 +#define RK3568_PD_VPU 11 +#define RK3568_PD_CENTER 12 +#define RK3568_PD_RKVDEC 13 +#define RK3568_PD_RKVENC 14 +#define RK3568_PD_PIPE 15 +#define RK3568_PD_LOGIC_ALIVE 16 + +#endif diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h index d918bf321a71..3205699b5e41 100644 --- a/include/dt-bindings/power/summit,smb347-charger.h +++ b/include/dt-bindings/power/summit,smb347-charger.h @@ -16,4 +16,8 @@ #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW 1 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH 2 +/* Polarity of INOK signal */ +#define SMB3XX_SYSOK_INOK_ACTIVE_LOW 0 +#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH 1 + #endif diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h new file mode 100644 index 000000000000..a26bccc8b957 --- /dev/null +++ b/include/dt-bindings/reset/mt8195-resets.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Christine Zhu <christine.zhu@mediatek.com> + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8195 + +#define MT8195_TOPRGU_CONN_MCU_SW_RST 0 +#define MT8195_TOPRGU_INFRA_GRST_SW_RST 1 +#define MT8195_TOPRGU_APU_SW_RST 2 +#define MT8195_TOPRGU_INFRA_AO_GRST_SW_RST 6 +#define MT8195_TOPRGU_MMSYS_SW_RST 7 +#define MT8195_TOPRGU_MFG_SW_RST 8 +#define MT8195_TOPRGU_VENC_SW_RST 9 +#define MT8195_TOPRGU_VDEC_SW_RST 10 +#define MT8195_TOPRGU_IMG_SW_RST 11 +#define MT8195_TOPRGU_APMIXEDSYS_SW_RST 13 +#define MT8195_TOPRGU_AUDIO_SW_RST 14 +#define MT8195_TOPRGU_CAMSYS_SW_RST 15 +#define MT8195_TOPRGU_EDPTX_SW_RST 16 +#define MT8195_TOPRGU_ADSPSYS_SW_RST 21 +#define MT8195_TOPRGU_DPTX_SW_RST 22 +#define MT8195_TOPRGU_SPMI_MST_SW_RST 23 + +#define MT8195_TOPRGU_SW_RST_NUM 16 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */ diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h index 53c37f9c319a..03a0c0eb8147 100644 --- a/include/dt-bindings/reset/qcom,sdm845-pdc.h +++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h @@ -16,5 +16,7 @@ #define PDC_DISPLAY_SYNC_RESET 7 #define PDC_COMPUTE_SYNC_RESET 8 #define PDC_MODEM_SYNC_RESET 9 +#define PDC_WLAN_RF_SYNC_RESET 10 +#define PDC_WPSS_SYNC_RESET 11 #endif diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h index f0c3aaef67a0..f3a0ed317835 100644 --- a/include/dt-bindings/reset/stm32mp1-resets.h +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -7,6 +7,7 @@ #ifndef _DT_BINDINGS_STM32MP1_RESET_H_ #define _DT_BINDINGS_STM32MP1_RESET_H_ +#define MCU_HOLD_BOOT_R 2144 #define LTDC_R 3072 #define DSI_R 3076 #define DDRPERFM_R 3080 @@ -105,4 +106,18 @@ #define GPIOJ_R 19785 #define GPIOK_R 19786 +/* SCMI reset domain identifiers */ +#define RST_SCMI0_SPI6 0 +#define RST_SCMI0_I2C4 1 +#define RST_SCMI0_I2C6 2 +#define RST_SCMI0_USART1 3 +#define RST_SCMI0_STGEN 4 +#define RST_SCMI0_GPIOZ 5 +#define RST_SCMI0_CRYP1 6 +#define RST_SCMI0_HASH1 7 +#define RST_SCMI0_RNG1 8 +#define RST_SCMI0_MDMA 9 +#define RST_SCMI0_MCU 10 +#define RST_SCMI0_MCU_HOLD_BOOT 11 + #endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h deleted file mode 100644 index df044705a5ec..000000000000 --- a/include/dt-bindings/soc/zte,pm_domains.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 Linaro Ltd. - * - * Author: Baoyou Xie <baoyou.xie@linaro.org> - */ - -#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H -#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H - -#define DM_ZX296718_SAPPU 0 -#define DM_ZX296718_VDE 1 /* g1v6 */ -#define DM_ZX296718_VCE 2 /* h1v6 */ -#define DM_ZX296718_HDE 3 /* g2v2 */ -#define DM_ZX296718_VIU 4 -#define DM_ZX296718_USB20 5 -#define DM_ZX296718_USB21 6 -#define DM_ZX296718_USB30 7 -#define DM_ZX296718_HSIC 8 -#define DM_ZX296718_GMAC 9 -#define DM_ZX296718_TS 10 -#define DM_ZX296718_VOU 11 - -#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */ diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h index f64b5d2e6efd..66c21ab03eef 100644 --- a/include/dt-bindings/sound/qcom,q6afe.h +++ b/include/dt-bindings/sound/qcom,q6afe.h @@ -129,6 +129,8 @@ #define TX_CODEC_DMA_TX_5 124 #define RX_CODEC_DMA_RX_6 125 #define RX_CODEC_DMA_RX_7 126 +#define QUINARY_MI2S_RX 127 +#define QUINARY_MI2S_TX 128 #define LPASS_CLK_ID_PRI_MI2S_IBIT 1 #define LPASS_CLK_ID_PRI_MI2S_EBIT 2 diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h index fef3ef65967f..e6526b138174 100644 --- a/include/dt-bindings/usb/pd.h +++ b/include/dt-bindings/usb/pd.h @@ -106,6 +106,10 @@ * <20:16> :: Reserved, Shall be set to zero * <15:0> :: USB-IF assigned VID for this cable vendor */ + +/* PD Rev2.0 definition */ +#define IDH_PTYPE_UNDEF 0 + /* SOP Product Type (UFP) */ #define IDH_PTYPE_NOT_UFP 0 #define IDH_PTYPE_HUB 1 @@ -163,10 +167,10 @@ #define UFP_VDO_VER1_2 2 /* Device Capability */ -#define DEV_USB2_CAPABLE BIT(0) -#define DEV_USB2_BILLBOARD BIT(1) -#define DEV_USB3_CAPABLE BIT(2) -#define DEV_USB4_CAPABLE BIT(3) +#define DEV_USB2_CAPABLE (1 << 0) +#define DEV_USB2_BILLBOARD (1 << 1) +#define DEV_USB3_CAPABLE (1 << 2) +#define DEV_USB4_CAPABLE (1 << 3) /* Connector Type */ #define UFP_RECEPTACLE 2 @@ -191,9 +195,9 @@ /* Alternate Modes */ #define UFP_ALTMODE_NOT_SUPP 0 -#define UFP_ALTMODE_TBT3 BIT(0) -#define UFP_ALTMODE_RECFG BIT(1) -#define UFP_ALTMODE_NO_RECFG BIT(2) +#define UFP_ALTMODE_TBT3 (1 << 0) +#define UFP_ALTMODE_RECFG (1 << 1) +#define UFP_ALTMODE_NO_RECFG (1 << 2) /* USB Highest Speed */ #define UFP_USB2_ONLY 0 @@ -217,9 +221,9 @@ * <4:0> :: Port number */ #define DFP_VDO_VER1_1 1 -#define HOST_USB2_CAPABLE BIT(0) -#define HOST_USB3_CAPABLE BIT(1) -#define HOST_USB4_CAPABLE BIT(2) +#define HOST_USB2_CAPABLE (1 << 0) +#define HOST_USB3_CAPABLE (1 << 1) +#define HOST_USB4_CAPABLE (1 << 2) #define DFP_RECEPTACLE 2 #define DFP_CAPTIVE 3 @@ -228,7 +232,25 @@ | ((pnum) & 0x1f)) /* - * Passive Cable VDO + * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0) + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:20> :: Reserved, Shall be set to zero + * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive) + * <17> :: Reserved, Shall be set to zero + * <16:13> :: cable latency (0001 == <10ns(~1m length)) + * <12:11> :: cable termination type (11b == both ends active VCONN req) + * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable) + * <9> :: SSTX2 Directionality support + * <8> :: SSRX1 Directionality support + * <7> :: SSRX2 Directionality support + * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A) + * <4> :: Vbus through cable (0b == no, 1b == yes) + * <3> :: SOP" controller present? (0b == no, 1b == yes) + * <2:0> :: USB SS Signaling support + * + * Passive Cable VDO (PD Rev3.0+) * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version @@ -244,7 +266,7 @@ * <4:3> :: Reserved, Shall be set to zero * <2:0> :: USB highest speed * - * Active Cable VDO 1 + * Active Cable VDO 1 (PD Rev3.0+) * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version @@ -266,7 +288,9 @@ #define CABLE_VDO_VER1_0 0 #define CABLE_VDO_VER1_3 3 -/* Connector Type */ +/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */ +#define CABLE_ATYPE 0 +#define CABLE_BTYPE 1 #define CABLE_CTYPE 2 #define CABLE_CAPTIVE 3 @@ -303,12 +327,22 @@ #define CABLE_CURR_3A 1 #define CABLE_CURR_5A 2 +/* USB SuperSpeed Signaling Support (PD Rev2.0) */ +#define CABLE_USBSS_U2_ONLY 0 +#define CABLE_USBSS_U31_GEN1 1 +#define CABLE_USBSS_U31_GEN2 2 + /* USB Highest Speed */ #define CABLE_USB2_ONLY 0 #define CABLE_USB32_GEN1 1 #define CABLE_USB32_4_GEN2 2 #define CABLE_USB4_GEN3 3 +#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \ + (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \ + | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \ + | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \ + | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7)) #define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \ @@ -374,6 +408,35 @@ | (iso) << 2 | (gen)) /* + * AMA VDO (PD Rev2.0) + * --------- + * <31:28> :: Cable HW version + * <27:24> :: Cable FW version + * <23:12> :: Reserved, Shall be set to zero + * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable) + * <10> :: SSTX2 Directionality support + * <9> :: SSRX1 Directionality support + * <8> :: SSRX2 Directionality support + * <7:5> :: Vconn power + * <4> :: Vconn power required + * <3> :: Vbus power required + * <2:0> :: USB SS Signaling support + */ +#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \ + (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \ + | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \ + | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \ + | ((usbss) & 0x7)) + +#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1) +#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1) + +#define AMA_USBSS_U2_ONLY 0 +#define AMA_USBSS_U31_GEN1 1 +#define AMA_USBSS_U31_GEN2 2 +#define AMA_USBSS_BBONLY 3 + +/* * VPD VDO * --------- * <31:28> :: HW version diff --git a/include/kunit/test.h b/include/kunit/test.h index 49601c4b98b8..24b40e5c160b 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -97,6 +97,9 @@ struct kunit; /* Maximum size of parameter description string. */ #define KUNIT_PARAM_DESC_SIZE 128 +/* Maximum size of a status comment. */ +#define KUNIT_STATUS_COMMENT_SIZE 256 + /* * TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a * sub-subtest. See the "Subtests" section in @@ -106,6 +109,18 @@ struct kunit; #define KUNIT_SUBSUBTEST_INDENT " " /** + * enum kunit_status - Type of result for a test or test suite + * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped + * @KUNIT_FAILURE: Denotes the test has failed. + * @KUNIT_SKIPPED: Denotes the test has been skipped. + */ +enum kunit_status { + KUNIT_SUCCESS, + KUNIT_FAILURE, + KUNIT_SKIPPED, +}; + +/** * struct kunit_case - represents an individual test case. * * @run_case: the function representing the actual test case. @@ -148,13 +163,20 @@ struct kunit_case { const void* (*generate_params)(const void *prev, char *desc); /* private: internal use only. */ - bool success; + enum kunit_status status; char *log; }; -static inline char *kunit_status_to_string(bool status) +static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) { - return status ? "ok" : "not ok"; + switch (status) { + case KUNIT_SKIPPED: + case KUNIT_SUCCESS: + return "ok"; + case KUNIT_FAILURE: + return "not ok"; + } + return "invalid"; } /** @@ -212,6 +234,7 @@ struct kunit_suite { struct kunit_case *test_cases; /* private: internal use only */ + char status_comment[KUNIT_STATUS_COMMENT_SIZE]; struct dentry *debugfs; char *log; }; @@ -245,19 +268,21 @@ struct kunit { * be read after the test case finishes once all threads associated * with the test case have terminated. */ - bool success; /* Read only after test_case finishes! */ spinlock_t lock; /* Guards all mutable test state. */ + enum kunit_status status; /* Read only after test_case finishes! */ /* * Because resources is a list that may be updated multiple times (with * new resources) from any thread associated with a test case, we must * protect it with some type of lock. */ struct list_head resources; /* Protected by lock. */ + + char status_comment[KUNIT_STATUS_COMMENT_SIZE]; }; static inline void kunit_set_failure(struct kunit *test) { - WRITE_ONCE(test->success, false); + WRITE_ONCE(test->status, KUNIT_FAILURE); } void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -348,7 +373,7 @@ static inline int kunit_run_all_tests(void) #define kunit_suite_for_each_test_case(suite, test_case) \ for (test_case = suite->test_cases; test_case->run_case; test_case++) -bool kunit_suite_has_succeeded(struct kunit_suite *suite); +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite); /* * Like kunit_alloc_resource() below, but returns the struct kunit_resource @@ -515,8 +540,9 @@ kunit_find_resource(struct kunit *test, void *match_data) { struct kunit_resource *res, *found = NULL; + unsigned long flags; - spin_lock(&test->lock); + spin_lock_irqsave(&test->lock, flags); list_for_each_entry_reverse(res, &test->resources, node) { if (match(test, res, (void *)match_data)) { @@ -526,7 +552,7 @@ kunit_find_resource(struct kunit *test, } } - spin_unlock(&test->lock); + spin_unlock_irqrestore(&test->lock, flags); return found; } @@ -577,16 +603,30 @@ static inline int kunit_destroy_named_resource(struct kunit *test, void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); /** - * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*. + * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*. * @test: The test context object. + * @n: number of elements. * @size: The size in bytes of the desired memory. * @gfp: flags passed to underlying kmalloc(). * - * Just like `kmalloc(...)`, except the allocation is managed by the test case + * Just like `kmalloc_array(...)`, except the allocation is managed by the test case * and is automatically cleaned up after the test case concludes. See &struct * kunit_resource for more information. */ -void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp); +void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags); + +/** + * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*. + * @test: The test context object. + * @size: The size in bytes of the desired memory. + * @gfp: flags passed to underlying kmalloc(). + * + * See kmalloc() and kunit_kmalloc_array() for more information. + */ +static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) +{ + return kunit_kmalloc_array(test, 1, size, gfp); +} /** * kunit_kfree() - Like kfree except for allocations managed by KUnit. @@ -601,16 +641,66 @@ void kunit_kfree(struct kunit *test, const void *ptr); * @size: The size in bytes of the desired memory. * @gfp: flags passed to underlying kmalloc(). * - * See kzalloc() and kunit_kmalloc() for more information. + * See kzalloc() and kunit_kmalloc_array() for more information. */ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp) { return kunit_kmalloc(test, size, gfp | __GFP_ZERO); } +/** + * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation. + * @test: The test context object. + * @n: number of elements. + * @size: The size in bytes of the desired memory. + * @gfp: flags passed to underlying kmalloc(). + * + * See kcalloc() and kunit_kmalloc_array() for more information. + */ +static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags) +{ + return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO); +} + void kunit_cleanup(struct kunit *test); -void kunit_log_append(char *log, const char *fmt, ...); +void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...); + +/** + * kunit_mark_skipped() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Marks the test as skipped. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution continues after kunit_mark_skipped() is called. + */ +#define kunit_mark_skipped(test_or_suite, fmt, ...) \ + do { \ + WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \ + scnprintf((test_or_suite)->status_comment, \ + KUNIT_STATUS_COMMENT_SIZE, \ + fmt, ##__VA_ARGS__); \ + } while (0) + +/** + * kunit_skip() - Marks @test_or_suite as skipped + * + * @test_or_suite: The test context object. + * @fmt: A printk() style format string. + * + * Skips the test. @fmt is given output as the test status + * comment, typically the reason the test was skipped. + * + * Test execution is halted after kunit_skip() is called. + */ +#define kunit_skip(test_or_suite, fmt, ...) \ + do { \ + kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\ + kunit_try_catch_throw(&((test_or_suite)->try_catch)); \ + } while (0) /* * printk and log to per-test or per-suite log buffer. Logging only done @@ -775,7 +865,6 @@ void kunit_do_assertion(struct kunit *test, do { \ typeof(left) __left = (left); \ typeof(right) __right = (right); \ - ((void)__typecheck(__left, __right)); \ \ KUNIT_ASSERTION(test, \ __left op __right, \ @@ -1129,8 +1218,8 @@ do { \ fmt, \ ...) \ do { \ - typeof(left) __left = (left); \ - typeof(right) __right = (right); \ + const char *__left = (left); \ + const char *__right = (right); \ \ KUNIT_ASSERTION(test, \ strcmp(__left, __right) op 0, \ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index ec621180ef09..e602d848fc1a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -72,6 +72,9 @@ struct vgic_global { bool has_gicv4; bool has_gicv4_1; + /* Pseudo GICv3 from outer space */ + bool no_hw_deactivation; + /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; @@ -89,6 +92,26 @@ enum vgic_irq_config { VGIC_CONFIG_LEVEL }; +/* + * Per-irq ops overriding some common behavious. + * + * Always called in non-preemptible section and the functions can use + * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. + */ +struct irq_ops { + /* Per interrupt flags for special-cased interrupts */ + unsigned long flags; + +#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ + + /* + * Callback function pointer to in-kernel devices that can tell us the + * state of the input level of mapped level-triggered IRQ faster than + * peaking into the physical GIC. + */ + bool (*get_input_level)(int vintid); +}; + struct vgic_irq { raw_spinlock_t irq_lock; /* Protects the content of the struct */ struct list_head lpi_list; /* Used to link all LPIs together */ @@ -126,21 +149,17 @@ struct vgic_irq { u8 group; /* 0 == group 0, 1 == group 1 */ enum vgic_irq_config config; /* Level or edge */ - /* - * Callback function pointer to in-kernel devices that can tell us the - * state of the input level of mapped level-triggered IRQ faster than - * peaking into the physical GIC. - * - * Always called in non-preemptible section and the functions can use - * kvm_arm_get_running_vcpu() to get the vcpu pointer for private - * IRQs. - */ - bool (*get_input_level)(int vintid); + struct irq_ops *ops; void *owner; /* Opaque pointer to reserve an interrupt for in-kernel devices. */ }; +static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) +{ + return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); +} + struct vgic_register_region; struct vgic_its; @@ -352,7 +371,7 @@ void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, - u32 vintid, bool (*get_input_level)(int vindid)); + u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c60745f657e9..974d497a897d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -132,6 +132,7 @@ enum acpi_address_range_id { union acpi_subtable_headers { struct acpi_subtable_header common; struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; }; typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); @@ -248,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -259,9 +260,12 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +static inline void +acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { } #endif int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); @@ -551,6 +555,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 +#define OSC_SB_PRM_SUPPORT 0x00200000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; @@ -666,7 +671,6 @@ extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); -void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *, struct property_entry *); @@ -710,6 +714,8 @@ static inline u64 acpi_arch_get_root_pointer(void) } #endif +int acpi_get_local_address(acpi_handle handle, u32 *addr); + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -765,7 +771,7 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) return false; } -static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } @@ -775,12 +781,12 @@ static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) return false; } -static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; } -static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, +static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; @@ -911,7 +917,7 @@ acpi_create_platform_device(struct acpi_device *adev, return NULL; } -static inline bool acpi_dma_supported(struct acpi_device *adev) +static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } @@ -965,6 +971,11 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res) return NULL; } +static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) +{ + return -ENODEV; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC @@ -1004,6 +1015,7 @@ int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); +bool acpi_storage_d3(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } @@ -1011,6 +1023,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } +static inline bool acpi_storage_d3(struct device *dev) +{ + return false; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) @@ -1096,6 +1112,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); +bool acpi_gpio_get_io_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, @@ -1103,6 +1121,11 @@ static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, { return false; } +static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + return false; +} static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index) { @@ -1357,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) #endif #ifdef CONFIG_ACPI -extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +extern void acpi_device_notify(struct device *dev); +extern void acpi_device_notify_remove(struct device *dev); #else -static inline int -acpi_platform_notify(struct device *dev, enum kobject_action action) -{ - return 0; -} +static inline void acpi_device_notify(struct device *dev) { } +static inline void acpi_device_notify_remove(struct device *dev) { } #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1a12baa58e40..f1f0842a2cb2 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -34,9 +34,8 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ -void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); -const struct iommu_ops *iort_iommu_configure_id(struct device *dev, - const u32 *id_in); +int iort_dma_get_ranges(struct device *dev, u64 *size); +int iort_iommu_configure_id(struct device *dev, const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); #else @@ -48,11 +47,10 @@ static inline struct irq_domain *iort_get_device_domain( { return NULL; } static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ -static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, - u64 *size) { } -static inline const struct iommu_ops *iort_iommu_configure_id( - struct device *dev, const u32 *id_in) -{ return NULL; } +static inline int iort_dma_get_ranges(struct device *dev, u64 *size) +{ return -ENODEV; } +static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) +{ return -ENODEV; } static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) { return 0; } diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h new file mode 100644 index 000000000000..0a24ab7cb66f --- /dev/null +++ b/include/linux/acpi_mdio.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ACPI helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_ACPI_MDIO_H +#define __LINUX_ACPI_MDIO_H + +#include <linux/phy.h> + +#if IS_ENABLED(CONFIG_ACPI_MDIO) +int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode); +#else /* CONFIG_ACPI_MDIO */ +static inline int +acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) +{ + /* + * Fall back to mdiobus_register() function to register a bus. + * This way, we don't have to keep compat bits around in drivers. + */ + + return mdiobus_register(mdio); +} +#endif + +#endif /* __LINUX_ACPI_MDIO_H */ diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h new file mode 100644 index 000000000000..1eb8ee5b0e5f --- /dev/null +++ b/include/linux/acpi_viot.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ACPI_VIOT_H__ +#define __ACPI_VIOT_H__ + +#include <linux/acpi.h> + +#ifdef CONFIG_ACPI_VIOT +void __init acpi_viot_init(void); +int viot_iommu_configure(struct device *dev); +#else +static inline void acpi_viot_init(void) {} +static inline int viot_iommu_configure(struct device *dev) +{ + return -ENODEV; +} +#endif + +#endif /* __ACPI_VIOT_H__ */ diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h index a889f28afb42..c637e0997f6d 100644 --- a/include/linux/adreno-smmu-priv.h +++ b/include/linux/adreno-smmu-priv.h @@ -9,6 +9,32 @@ #include <linux/io-pgtable.h> /** + * struct adreno_smmu_fault_info - container for key fault information + * + * @far: The faulting IOVA from ARM_SMMU_CB_FAR + * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0 + * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR + * @fsr: The fault status from ARM_SMMU_CB_FSR + * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0 + * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0 + * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx) + * + * This struct passes back key page fault information to the GPU driver + * through the get_fault_info function pointer. + * The GPU driver can use this information to print informative + * log messages and provide deeper GPU specific insight into the fault. + */ +struct adreno_smmu_fault_info { + u64 far; + u64 ttbr0; + u32 contextidr; + u32 fsr; + u32 fsynr0; + u32 fsynr1; + u32 cbfrsynra; +}; + +/** * struct adreno_smmu_priv - private interface between adreno-smmu and GPU * * @cookie: An opque token provided by adreno-smmu and passed @@ -17,6 +43,13 @@ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A * NULL config disables TTBR0 translation, otherwise * TTBR0 translation is enabled with the specified cfg + * @get_fault_info: Called by the GPU fault handler to get information about + * the fault + * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call + * before set_ttbr0_cfg(). If stalling on fault is enabled, + * the GPU driver must call resume_translation() + * @resume_translation: Resume translation after a fault + * * * The GPU driver (drm/msm) and adreno-smmu work together for controlling * the GPU's SMMU instance. This is by necessity, as the GPU is directly @@ -31,6 +64,9 @@ struct adreno_smmu_priv { const void *cookie; const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie); int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg); + void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info); + void (*set_stall)(const void *cookie, bool enabled); + void (*resume_translation)(const void *cookie, bool terminate); }; -#endif /* __ADRENO_SMMU_PRIV_H */
\ No newline at end of file +#endif /* __ADRENO_SMMU_PRIV_H */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 6861489a1890..7d1cabe15262 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -63,6 +63,9 @@ #define ARM_SMCCC_VERSION_1_0 0x10000 #define ARM_SMCCC_VERSION_1_1 0x10001 #define ARM_SMCCC_VERSION_1_2 0x10002 +#define ARM_SMCCC_VERSION_1_3 0x10003 + +#define ARM_SMCCC_1_3_SVE_HINT 0x10000 #define ARM_SMCCC_VERSION_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ @@ -216,6 +219,8 @@ u32 arm_smccc_get_version(void); void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); +extern u64 smccc_has_sve_hint; + /** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 @@ -227,6 +232,61 @@ struct arm_smccc_res { unsigned long a3; }; +#ifdef CONFIG_ARM64 +/** + * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call + * @a0-a17 argument values from registers 0 to 17 + */ +struct arm_smccc_1_2_regs { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long a8; + unsigned long a9; + unsigned long a10; + unsigned long a11; + unsigned long a12; + unsigned long a13; + unsigned long a14; + unsigned long a15; + unsigned long a16; + unsigned long a17; +}; + +/** + * arm_smccc_1_2_hvc() - make HVC calls + * @args: arguments passed via struct arm_smccc_1_2_regs + * @res: result values via struct arm_smccc_1_2_regs + * + * This function is used to make HVC calls following SMC Calling Convention + * v1.2 or above. The content of the supplied param are copied from the + * structure to registers prior to the HVC instruction. The return values + * are updated with the content from registers on return from the HVC + * instruction. + */ +asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); + +/** + * arm_smccc_1_2_smc() - make SMC calls + * @args: arguments passed via struct arm_smccc_1_2_regs + * @res: result values via struct arm_smccc_1_2_regs + * + * This function is used to make SMC calls following SMC Calling Convention + * v1.2 or above. The content of the supplied param are copied from the + * structure to registers prior to the SMC instruction. The return values + * are updated with the content from registers on return from the SMC + * instruction. + */ +asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +#endif + /** * struct arm_smccc_quirk - Contains quirk information * @id: quirk identification @@ -241,6 +301,15 @@ struct arm_smccc_quirk { }; /** + * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls + * + * Sets the SMCCC hint bit to indicate if there is live state in the SVE + * registers, this modifies x0 in place and should never be called from C + * code. + */ +asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); + +/** * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 * @res: result values from registers 0 to 3 @@ -297,6 +366,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #endif +/* nVHE hypervisor doesn't have a current thread so needs separate checks */ +#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__) + +#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \ + ARM64_SVE) +#define smccc_sve_clobbers "x16", "x30", "cc", + +#else + +#define SMCCC_SVE_CHECK +#define smccc_sve_clobbers + +#endif + #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x #define __count_args(...) \ @@ -364,7 +447,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define ___constraints(count) \ : __constraint_read_ ## count \ - : "memory" + : smccc_sve_clobbers "memory" #define __constraints(count) ___constraints(count) /* @@ -379,7 +462,8 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3"); \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(inst "\n" : \ + asm volatile(SMCCC_SVE_CHECK \ + inst "\n" : \ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h new file mode 100644 index 000000000000..505c679b6a9b --- /dev/null +++ b/include/linux/arm_ffa.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 ARM Ltd. + */ + +#ifndef _LINUX_ARM_FFA_H +#define _LINUX_ARM_FFA_H + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/uuid.h> + +/* FFA Bus/Device/Driver related */ +struct ffa_device { + int vm_id; + bool mode_32bit; + uuid_t uuid; + struct device dev; +}; + +#define to_ffa_dev(d) container_of(d, struct ffa_device, dev) + +struct ffa_device_id { + uuid_t uuid; +}; + +struct ffa_driver { + const char *name; + int (*probe)(struct ffa_device *sdev); + void (*remove)(struct ffa_device *sdev); + const struct ffa_device_id *id_table; + + struct device_driver driver; +}; + +#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver) + +static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) +{ + fdev->dev.driver_data = data; +} + +#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) +struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id); +void ffa_device_unregister(struct ffa_device *ffa_dev); +int ffa_driver_register(struct ffa_driver *driver, struct module *owner, + const char *mod_name); +void ffa_driver_unregister(struct ffa_driver *driver); +bool ffa_device_is_valid(struct ffa_device *ffa_dev); +const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev); + +#else +static inline +struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) +{ + return NULL; +} + +static inline void ffa_device_unregister(struct ffa_device *dev) {} + +static inline int +ffa_driver_register(struct ffa_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void ffa_driver_unregister(struct ffa_driver *driver) {} + +static inline +bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } + +static inline +const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) +{ + return NULL; +} +#endif /* CONFIG_ARM_FFA_TRANSPORT */ + +#define ffa_register(driver) \ + ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define ffa_unregister(driver) \ + ffa_driver_unregister(driver) + +/** + * module_ffa_driver() - Helper macro for registering a psa_ffa driver + * @__ffa_driver: ffa_driver structure + * + * Helper macro for psa_ffa drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_ffa_driver(__ffa_driver) \ + module_driver(__ffa_driver, ffa_register, ffa_unregister) + +/* FFA transport related */ +struct ffa_partition_info { + u16 id; + u16 exec_ctxt; +/* partition supports receipt of direct requests */ +#define FFA_PARTITION_DIRECT_RECV BIT(0) +/* partition can send direct requests. */ +#define FFA_PARTITION_DIRECT_SEND BIT(1) +/* partition can send and receive indirect messages. */ +#define FFA_PARTITION_INDIRECT_MSG BIT(2) + u32 properties; +}; + +/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ +struct ffa_send_direct_data { + unsigned long data0; /* w3/x3 */ + unsigned long data1; /* w4/x4 */ + unsigned long data2; /* w5/x5 */ + unsigned long data3; /* w6/x6 */ + unsigned long data4; /* w7/x7 */ +}; + +struct ffa_mem_region_addr_range { + /* The base IPA of the constituent memory region, aligned to 4 kiB */ + u64 address; + /* The number of 4 kiB pages in the constituent memory region. */ + u32 pg_cnt; + u32 reserved; +}; + +struct ffa_composite_mem_region { + /* + * The total number of 4 kiB pages included in this memory region. This + * must be equal to the sum of page counts specified in each + * `struct ffa_mem_region_addr_range`. + */ + u32 total_pg_cnt; + /* The number of constituents included in this memory region range */ + u32 addr_range_cnt; + u64 reserved; + /** An array of `addr_range_cnt` memory region constituents. */ + struct ffa_mem_region_addr_range constituents[]; +}; + +struct ffa_mem_region_attributes { + /* The ID of the VM to which the memory is being given or shared. */ + u16 receiver; + /* + * The permissions with which the memory region should be mapped in the + * receiver's page table. + */ +#define FFA_MEM_EXEC BIT(3) +#define FFA_MEM_NO_EXEC BIT(2) +#define FFA_MEM_RW BIT(1) +#define FFA_MEM_RO BIT(0) + u8 attrs; + /* + * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP + * for memory regions with multiple borrowers. + */ +#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) + u8 flag; + u32 composite_off; + /* + * Offset in bytes from the start of the outer `ffa_memory_region` to + * an `struct ffa_mem_region_addr_range`. + */ + u64 reserved; +}; + +struct ffa_mem_region { + /* The ID of the VM/owner which originally sent the memory region */ + u16 sender_id; +#define FFA_MEM_NORMAL BIT(5) +#define FFA_MEM_DEVICE BIT(4) + +#define FFA_MEM_WRITE_BACK (3 << 2) +#define FFA_MEM_NON_CACHEABLE (1 << 2) + +#define FFA_DEV_nGnRnE (0 << 2) +#define FFA_DEV_nGnRE (1 << 2) +#define FFA_DEV_nGRE (2 << 2) +#define FFA_DEV_GRE (3 << 2) + +#define FFA_MEM_NON_SHAREABLE (0) +#define FFA_MEM_OUTER_SHAREABLE (2) +#define FFA_MEM_INNER_SHAREABLE (3) + u8 attributes; + u8 reserved_0; +/* + * Clear memory region contents after unmapping it from the sender and + * before mapping it for any receiver. + */ +#define FFA_MEM_CLEAR BIT(0) +/* + * Whether the hypervisor may time slice the memory sharing or retrieval + * operation. + */ +#define FFA_TIME_SLICE_ENABLE BIT(1) + +#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) +#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) +#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) +#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) + +#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) +#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) + /* Flags to control behaviour of the transaction. */ + u32 flags; +#define HANDLE_LOW_MASK GENMASK_ULL(31, 0) +#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) +#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) +#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) + +#define PACK_HANDLE(l, h) \ + (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) + /* + * A globally-unique ID assigned by the hypervisor for a region + * of memory being sent between VMs. + */ + u64 handle; + /* + * An implementation defined value associated with the receiver and the + * memory region. + */ + u64 tag; + u32 reserved_1; + /* + * The number of `ffa_mem_region_attributes` entries included in this + * transaction. + */ + u32 ep_count; + /* + * An array of endpoint memory access descriptors. + * Each one specifies a memory region offset, an endpoint and the + * attributes with which this memory region should be mapped in that + * endpoint's page table. + */ + struct ffa_mem_region_attributes ep_mem_access[]; +}; + +#define COMPOSITE_OFFSET(x) \ + (offsetof(struct ffa_mem_region, ep_mem_access[x])) +#define CONSTITUENTS_OFFSET(x) \ + (offsetof(struct ffa_composite_mem_region, constituents[x])) +#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ + (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) + +struct ffa_mem_ops_args { + bool use_txbuf; + u32 nattrs; + u32 flags; + u64 tag; + u64 g_handle; + struct scatterlist *sg; + struct ffa_mem_region_attributes *attrs; +}; + +struct ffa_dev_ops { + u32 (*api_version_get)(void); + int (*partition_info_get)(const char *uuid_str, + struct ffa_partition_info *buffer); + void (*mode_32bit_set)(struct ffa_device *dev); + int (*sync_send_receive)(struct ffa_device *dev, + struct ffa_send_direct_data *data); + int (*memory_reclaim)(u64 g_handle, u32 flags); + int (*memory_share)(struct ffa_device *dev, + struct ffa_mem_ops_args *args); +}; + +#endif /* _LINUX_ARM_FFA_H */ diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h index 4cc40201273e..83ad775ad0aa 100644 --- a/include/linux/ascii85.h +++ b/include/linux/ascii85.h @@ -8,7 +8,8 @@ #ifndef _ASCII85_H_ #define _ASCII85_H_ -#include <linux/kernel.h> +#include <linux/math.h> +#include <linux/types.h> #define ASCII85_BUFSZ 6 diff --git a/include/linux/ata.h b/include/linux/ata.h index 6e67aded28f8..1b44f40c7700 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -13,7 +13,7 @@ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ -#include <linux/kernel.h> +#include <linux/bits.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h deleted file mode 100644 index 2a3f55d98be9..000000000000 --- a/include/linux/atomic-fallback.h +++ /dev/null @@ -1,2595 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-fallback.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _LINUX_ATOMIC_FALLBACK_H -#define _LINUX_ATOMIC_FALLBACK_H - -#include <linux/compiler.h> - -#ifndef xchg_relaxed -#define xchg_acquire xchg -#define xchg_release xchg -#define xchg_relaxed xchg -#else /* xchg_relaxed */ - -#ifndef xchg_acquire -#define xchg_acquire(...) \ - __atomic_op_acquire(xchg, __VA_ARGS__) -#endif - -#ifndef xchg_release -#define xchg_release(...) \ - __atomic_op_release(xchg, __VA_ARGS__) -#endif - -#ifndef xchg -#define xchg(...) \ - __atomic_op_fence(xchg, __VA_ARGS__) -#endif - -#endif /* xchg_relaxed */ - -#ifndef cmpxchg_relaxed -#define cmpxchg_acquire cmpxchg -#define cmpxchg_release cmpxchg -#define cmpxchg_relaxed cmpxchg -#else /* cmpxchg_relaxed */ - -#ifndef cmpxchg_acquire -#define cmpxchg_acquire(...) \ - __atomic_op_acquire(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg_release -#define cmpxchg_release(...) \ - __atomic_op_release(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg -#define cmpxchg(...) \ - __atomic_op_fence(cmpxchg, __VA_ARGS__) -#endif - -#endif /* cmpxchg_relaxed */ - -#ifndef cmpxchg64_relaxed -#define cmpxchg64_acquire cmpxchg64 -#define cmpxchg64_release cmpxchg64 -#define cmpxchg64_relaxed cmpxchg64 -#else /* cmpxchg64_relaxed */ - -#ifndef cmpxchg64_acquire -#define cmpxchg64_acquire(...) \ - __atomic_op_acquire(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64_release -#define cmpxchg64_release(...) \ - __atomic_op_release(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64 -#define cmpxchg64(...) \ - __atomic_op_fence(cmpxchg64, __VA_ARGS__) -#endif - -#endif /* cmpxchg64_relaxed */ - -#ifndef try_cmpxchg_relaxed -#ifdef try_cmpxchg -#define try_cmpxchg_acquire try_cmpxchg -#define try_cmpxchg_release try_cmpxchg -#define try_cmpxchg_relaxed try_cmpxchg -#endif /* try_cmpxchg */ - -#ifndef try_cmpxchg -#define try_cmpxchg(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg */ - -#ifndef try_cmpxchg_acquire -#define try_cmpxchg_acquire(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_acquire */ - -#ifndef try_cmpxchg_release -#define try_cmpxchg_release(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_release((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_release */ - -#ifndef try_cmpxchg_relaxed -#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_relaxed */ - -#else /* try_cmpxchg_relaxed */ - -#ifndef try_cmpxchg_acquire -#define try_cmpxchg_acquire(...) \ - __atomic_op_acquire(try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef try_cmpxchg_release -#define try_cmpxchg_release(...) \ - __atomic_op_release(try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef try_cmpxchg -#define try_cmpxchg(...) \ - __atomic_op_fence(try_cmpxchg, __VA_ARGS__) -#endif - -#endif /* try_cmpxchg_relaxed */ - -#define arch_atomic_read atomic_read -#define arch_atomic_read_acquire atomic_read_acquire - -#ifndef atomic_read_acquire -static __always_inline int -atomic_read_acquire(const atomic_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define atomic_read_acquire atomic_read_acquire -#endif - -#define arch_atomic_set atomic_set -#define arch_atomic_set_release atomic_set_release - -#ifndef atomic_set_release -static __always_inline void -atomic_set_release(atomic_t *v, int i) -{ - smp_store_release(&(v)->counter, i); -} -#define atomic_set_release atomic_set_release -#endif - -#define arch_atomic_add atomic_add - -#define arch_atomic_add_return atomic_add_return -#define arch_atomic_add_return_acquire atomic_add_return_acquire -#define arch_atomic_add_return_release atomic_add_return_release -#define arch_atomic_add_return_relaxed atomic_add_return_relaxed - -#ifndef atomic_add_return_relaxed -#define atomic_add_return_acquire atomic_add_return -#define atomic_add_return_release atomic_add_return -#define atomic_add_return_relaxed atomic_add_return -#else /* atomic_add_return_relaxed */ - -#ifndef atomic_add_return_acquire -static __always_inline int -atomic_add_return_acquire(int i, atomic_t *v) -{ - int ret = atomic_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_add_return_acquire atomic_add_return_acquire -#endif - -#ifndef atomic_add_return_release -static __always_inline int -atomic_add_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_add_return_relaxed(i, v); -} -#define atomic_add_return_release atomic_add_return_release -#endif - -#ifndef atomic_add_return -static __always_inline int -atomic_add_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_add_return atomic_add_return -#endif - -#endif /* atomic_add_return_relaxed */ - -#define arch_atomic_fetch_add atomic_fetch_add -#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire -#define arch_atomic_fetch_add_release atomic_fetch_add_release -#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed - -#ifndef atomic_fetch_add_relaxed -#define atomic_fetch_add_acquire atomic_fetch_add -#define atomic_fetch_add_release atomic_fetch_add -#define atomic_fetch_add_relaxed atomic_fetch_add -#else /* atomic_fetch_add_relaxed */ - -#ifndef atomic_fetch_add_acquire -static __always_inline int -atomic_fetch_add_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#endif - -#ifndef atomic_fetch_add_release -static __always_inline int -atomic_fetch_add_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_add_relaxed(i, v); -} -#define atomic_fetch_add_release atomic_fetch_add_release -#endif - -#ifndef atomic_fetch_add -static __always_inline int -atomic_fetch_add(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_add atomic_fetch_add -#endif - -#endif /* atomic_fetch_add_relaxed */ - -#define arch_atomic_sub atomic_sub - -#define arch_atomic_sub_return atomic_sub_return -#define arch_atomic_sub_return_acquire atomic_sub_return_acquire -#define arch_atomic_sub_return_release atomic_sub_return_release -#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed - -#ifndef atomic_sub_return_relaxed -#define atomic_sub_return_acquire atomic_sub_return -#define atomic_sub_return_release atomic_sub_return -#define atomic_sub_return_relaxed atomic_sub_return -#else /* atomic_sub_return_relaxed */ - -#ifndef atomic_sub_return_acquire -static __always_inline int -atomic_sub_return_acquire(int i, atomic_t *v) -{ - int ret = atomic_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_sub_return_acquire atomic_sub_return_acquire -#endif - -#ifndef atomic_sub_return_release -static __always_inline int -atomic_sub_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_sub_return_relaxed(i, v); -} -#define atomic_sub_return_release atomic_sub_return_release -#endif - -#ifndef atomic_sub_return -static __always_inline int -atomic_sub_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_sub_return atomic_sub_return -#endif - -#endif /* atomic_sub_return_relaxed */ - -#define arch_atomic_fetch_sub atomic_fetch_sub -#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#define arch_atomic_fetch_sub_release atomic_fetch_sub_release -#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed - -#ifndef atomic_fetch_sub_relaxed -#define atomic_fetch_sub_acquire atomic_fetch_sub -#define atomic_fetch_sub_release atomic_fetch_sub -#define atomic_fetch_sub_relaxed atomic_fetch_sub -#else /* atomic_fetch_sub_relaxed */ - -#ifndef atomic_fetch_sub_acquire -static __always_inline int -atomic_fetch_sub_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#endif - -#ifndef atomic_fetch_sub_release -static __always_inline int -atomic_fetch_sub_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_sub_relaxed(i, v); -} -#define atomic_fetch_sub_release atomic_fetch_sub_release -#endif - -#ifndef atomic_fetch_sub -static __always_inline int -atomic_fetch_sub(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_sub atomic_fetch_sub -#endif - -#endif /* atomic_fetch_sub_relaxed */ - -#define arch_atomic_inc atomic_inc - -#ifndef atomic_inc -static __always_inline void -atomic_inc(atomic_t *v) -{ - atomic_add(1, v); -} -#define atomic_inc atomic_inc -#endif - -#define arch_atomic_inc_return atomic_inc_return -#define arch_atomic_inc_return_acquire atomic_inc_return_acquire -#define arch_atomic_inc_return_release atomic_inc_return_release -#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed - -#ifndef atomic_inc_return_relaxed -#ifdef atomic_inc_return -#define atomic_inc_return_acquire atomic_inc_return -#define atomic_inc_return_release atomic_inc_return -#define atomic_inc_return_relaxed atomic_inc_return -#endif /* atomic_inc_return */ - -#ifndef atomic_inc_return -static __always_inline int -atomic_inc_return(atomic_t *v) -{ - return atomic_add_return(1, v); -} -#define atomic_inc_return atomic_inc_return -#endif - -#ifndef atomic_inc_return_acquire -static __always_inline int -atomic_inc_return_acquire(atomic_t *v) -{ - return atomic_add_return_acquire(1, v); -} -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif - -#ifndef atomic_inc_return_release -static __always_inline int -atomic_inc_return_release(atomic_t *v) -{ - return atomic_add_return_release(1, v); -} -#define atomic_inc_return_release atomic_inc_return_release -#endif - -#ifndef atomic_inc_return_relaxed -static __always_inline int -atomic_inc_return_relaxed(atomic_t *v) -{ - return atomic_add_return_relaxed(1, v); -} -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#endif - -#else /* atomic_inc_return_relaxed */ - -#ifndef atomic_inc_return_acquire -static __always_inline int -atomic_inc_return_acquire(atomic_t *v) -{ - int ret = atomic_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif - -#ifndef atomic_inc_return_release -static __always_inline int -atomic_inc_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_inc_return_relaxed(v); -} -#define atomic_inc_return_release atomic_inc_return_release -#endif - -#ifndef atomic_inc_return -static __always_inline int -atomic_inc_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_inc_return atomic_inc_return -#endif - -#endif /* atomic_inc_return_relaxed */ - -#define arch_atomic_fetch_inc atomic_fetch_inc -#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#define arch_atomic_fetch_inc_release atomic_fetch_inc_release -#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed - -#ifndef atomic_fetch_inc_relaxed -#ifdef atomic_fetch_inc -#define atomic_fetch_inc_acquire atomic_fetch_inc -#define atomic_fetch_inc_release atomic_fetch_inc -#define atomic_fetch_inc_relaxed atomic_fetch_inc -#endif /* atomic_fetch_inc */ - -#ifndef atomic_fetch_inc -static __always_inline int -atomic_fetch_inc(atomic_t *v) -{ - return atomic_fetch_add(1, v); -} -#define atomic_fetch_inc atomic_fetch_inc -#endif - -#ifndef atomic_fetch_inc_acquire -static __always_inline int -atomic_fetch_inc_acquire(atomic_t *v) -{ - return atomic_fetch_add_acquire(1, v); -} -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif - -#ifndef atomic_fetch_inc_release -static __always_inline int -atomic_fetch_inc_release(atomic_t *v) -{ - return atomic_fetch_add_release(1, v); -} -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif - -#ifndef atomic_fetch_inc_relaxed -static __always_inline int -atomic_fetch_inc_relaxed(atomic_t *v) -{ - return atomic_fetch_add_relaxed(1, v); -} -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#endif - -#else /* atomic_fetch_inc_relaxed */ - -#ifndef atomic_fetch_inc_acquire -static __always_inline int -atomic_fetch_inc_acquire(atomic_t *v) -{ - int ret = atomic_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif - -#ifndef atomic_fetch_inc_release -static __always_inline int -atomic_fetch_inc_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_inc_relaxed(v); -} -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif - -#ifndef atomic_fetch_inc -static __always_inline int -atomic_fetch_inc(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_inc atomic_fetch_inc -#endif - -#endif /* atomic_fetch_inc_relaxed */ - -#define arch_atomic_dec atomic_dec - -#ifndef atomic_dec -static __always_inline void -atomic_dec(atomic_t *v) -{ - atomic_sub(1, v); -} -#define atomic_dec atomic_dec -#endif - -#define arch_atomic_dec_return atomic_dec_return -#define arch_atomic_dec_return_acquire atomic_dec_return_acquire -#define arch_atomic_dec_return_release atomic_dec_return_release -#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed - -#ifndef atomic_dec_return_relaxed -#ifdef atomic_dec_return -#define atomic_dec_return_acquire atomic_dec_return -#define atomic_dec_return_release atomic_dec_return -#define atomic_dec_return_relaxed atomic_dec_return -#endif /* atomic_dec_return */ - -#ifndef atomic_dec_return -static __always_inline int -atomic_dec_return(atomic_t *v) -{ - return atomic_sub_return(1, v); -} -#define atomic_dec_return atomic_dec_return -#endif - -#ifndef atomic_dec_return_acquire -static __always_inline int -atomic_dec_return_acquire(atomic_t *v) -{ - return atomic_sub_return_acquire(1, v); -} -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif - -#ifndef atomic_dec_return_release -static __always_inline int -atomic_dec_return_release(atomic_t *v) -{ - return atomic_sub_return_release(1, v); -} -#define atomic_dec_return_release atomic_dec_return_release -#endif - -#ifndef atomic_dec_return_relaxed -static __always_inline int -atomic_dec_return_relaxed(atomic_t *v) -{ - return atomic_sub_return_relaxed(1, v); -} -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#endif - -#else /* atomic_dec_return_relaxed */ - -#ifndef atomic_dec_return_acquire -static __always_inline int -atomic_dec_return_acquire(atomic_t *v) -{ - int ret = atomic_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif - -#ifndef atomic_dec_return_release -static __always_inline int -atomic_dec_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_dec_return_relaxed(v); -} -#define atomic_dec_return_release atomic_dec_return_release -#endif - -#ifndef atomic_dec_return -static __always_inline int -atomic_dec_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_dec_return atomic_dec_return -#endif - -#endif /* atomic_dec_return_relaxed */ - -#define arch_atomic_fetch_dec atomic_fetch_dec -#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#define arch_atomic_fetch_dec_release atomic_fetch_dec_release -#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed - -#ifndef atomic_fetch_dec_relaxed -#ifdef atomic_fetch_dec -#define atomic_fetch_dec_acquire atomic_fetch_dec -#define atomic_fetch_dec_release atomic_fetch_dec -#define atomic_fetch_dec_relaxed atomic_fetch_dec -#endif /* atomic_fetch_dec */ - -#ifndef atomic_fetch_dec -static __always_inline int -atomic_fetch_dec(atomic_t *v) -{ - return atomic_fetch_sub(1, v); -} -#define atomic_fetch_dec atomic_fetch_dec -#endif - -#ifndef atomic_fetch_dec_acquire -static __always_inline int -atomic_fetch_dec_acquire(atomic_t *v) -{ - return atomic_fetch_sub_acquire(1, v); -} -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif - -#ifndef atomic_fetch_dec_release -static __always_inline int -atomic_fetch_dec_release(atomic_t *v) -{ - return atomic_fetch_sub_release(1, v); -} -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif - -#ifndef atomic_fetch_dec_relaxed -static __always_inline int -atomic_fetch_dec_relaxed(atomic_t *v) -{ - return atomic_fetch_sub_relaxed(1, v); -} -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#endif - -#else /* atomic_fetch_dec_relaxed */ - -#ifndef atomic_fetch_dec_acquire -static __always_inline int -atomic_fetch_dec_acquire(atomic_t *v) -{ - int ret = atomic_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif - -#ifndef atomic_fetch_dec_release -static __always_inline int -atomic_fetch_dec_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_dec_relaxed(v); -} -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif - -#ifndef atomic_fetch_dec -static __always_inline int -atomic_fetch_dec(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_dec atomic_fetch_dec -#endif - -#endif /* atomic_fetch_dec_relaxed */ - -#define arch_atomic_and atomic_and - -#define arch_atomic_fetch_and atomic_fetch_and -#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire -#define arch_atomic_fetch_and_release atomic_fetch_and_release -#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed - -#ifndef atomic_fetch_and_relaxed -#define atomic_fetch_and_acquire atomic_fetch_and -#define atomic_fetch_and_release atomic_fetch_and -#define atomic_fetch_and_relaxed atomic_fetch_and -#else /* atomic_fetch_and_relaxed */ - -#ifndef atomic_fetch_and_acquire -static __always_inline int -atomic_fetch_and_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#endif - -#ifndef atomic_fetch_and_release -static __always_inline int -atomic_fetch_and_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_and_relaxed(i, v); -} -#define atomic_fetch_and_release atomic_fetch_and_release -#endif - -#ifndef atomic_fetch_and -static __always_inline int -atomic_fetch_and(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_and atomic_fetch_and -#endif - -#endif /* atomic_fetch_and_relaxed */ - -#define arch_atomic_andnot atomic_andnot - -#ifndef atomic_andnot -static __always_inline void -atomic_andnot(int i, atomic_t *v) -{ - atomic_and(~i, v); -} -#define atomic_andnot atomic_andnot -#endif - -#define arch_atomic_fetch_andnot atomic_fetch_andnot -#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release -#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed - -#ifndef atomic_fetch_andnot_relaxed -#ifdef atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#endif /* atomic_fetch_andnot */ - -#ifndef atomic_fetch_andnot -static __always_inline int -atomic_fetch_andnot(int i, atomic_t *v) -{ - return atomic_fetch_and(~i, v); -} -#define atomic_fetch_andnot atomic_fetch_andnot -#endif - -#ifndef atomic_fetch_andnot_acquire -static __always_inline int -atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - return atomic_fetch_and_acquire(~i, v); -} -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif - -#ifndef atomic_fetch_andnot_release -static __always_inline int -atomic_fetch_andnot_release(int i, atomic_t *v) -{ - return atomic_fetch_and_release(~i, v); -} -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif - -#ifndef atomic_fetch_andnot_relaxed -static __always_inline int -atomic_fetch_andnot_relaxed(int i, atomic_t *v) -{ - return atomic_fetch_and_relaxed(~i, v); -} -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#endif - -#else /* atomic_fetch_andnot_relaxed */ - -#ifndef atomic_fetch_andnot_acquire -static __always_inline int -atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif - -#ifndef atomic_fetch_andnot_release -static __always_inline int -atomic_fetch_andnot_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_andnot_relaxed(i, v); -} -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif - -#ifndef atomic_fetch_andnot -static __always_inline int -atomic_fetch_andnot(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_andnot atomic_fetch_andnot -#endif - -#endif /* atomic_fetch_andnot_relaxed */ - -#define arch_atomic_or atomic_or - -#define arch_atomic_fetch_or atomic_fetch_or -#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire -#define arch_atomic_fetch_or_release atomic_fetch_or_release -#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed - -#ifndef atomic_fetch_or_relaxed -#define atomic_fetch_or_acquire atomic_fetch_or -#define atomic_fetch_or_release atomic_fetch_or -#define atomic_fetch_or_relaxed atomic_fetch_or -#else /* atomic_fetch_or_relaxed */ - -#ifndef atomic_fetch_or_acquire -static __always_inline int -atomic_fetch_or_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#endif - -#ifndef atomic_fetch_or_release -static __always_inline int -atomic_fetch_or_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_or_relaxed(i, v); -} -#define atomic_fetch_or_release atomic_fetch_or_release -#endif - -#ifndef atomic_fetch_or -static __always_inline int -atomic_fetch_or(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_or atomic_fetch_or -#endif - -#endif /* atomic_fetch_or_relaxed */ - -#define arch_atomic_xor atomic_xor - -#define arch_atomic_fetch_xor atomic_fetch_xor -#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#define arch_atomic_fetch_xor_release atomic_fetch_xor_release -#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed - -#ifndef atomic_fetch_xor_relaxed -#define atomic_fetch_xor_acquire atomic_fetch_xor -#define atomic_fetch_xor_release atomic_fetch_xor -#define atomic_fetch_xor_relaxed atomic_fetch_xor -#else /* atomic_fetch_xor_relaxed */ - -#ifndef atomic_fetch_xor_acquire -static __always_inline int -atomic_fetch_xor_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#endif - -#ifndef atomic_fetch_xor_release -static __always_inline int -atomic_fetch_xor_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_xor_relaxed(i, v); -} -#define atomic_fetch_xor_release atomic_fetch_xor_release -#endif - -#ifndef atomic_fetch_xor -static __always_inline int -atomic_fetch_xor(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_xor atomic_fetch_xor -#endif - -#endif /* atomic_fetch_xor_relaxed */ - -#define arch_atomic_xchg atomic_xchg -#define arch_atomic_xchg_acquire atomic_xchg_acquire -#define arch_atomic_xchg_release atomic_xchg_release -#define arch_atomic_xchg_relaxed atomic_xchg_relaxed - -#ifndef atomic_xchg_relaxed -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg -#define atomic_xchg_relaxed atomic_xchg -#else /* atomic_xchg_relaxed */ - -#ifndef atomic_xchg_acquire -static __always_inline int -atomic_xchg_acquire(atomic_t *v, int i) -{ - int ret = atomic_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define atomic_xchg_acquire atomic_xchg_acquire -#endif - -#ifndef atomic_xchg_release -static __always_inline int -atomic_xchg_release(atomic_t *v, int i) -{ - __atomic_release_fence(); - return atomic_xchg_relaxed(v, i); -} -#define atomic_xchg_release atomic_xchg_release -#endif - -#ifndef atomic_xchg -static __always_inline int -atomic_xchg(atomic_t *v, int i) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define atomic_xchg atomic_xchg -#endif - -#endif /* atomic_xchg_relaxed */ - -#define arch_atomic_cmpxchg atomic_cmpxchg -#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#define arch_atomic_cmpxchg_release atomic_cmpxchg_release -#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed - -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#else /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_cmpxchg_acquire -static __always_inline int -atomic_cmpxchg_acquire(atomic_t *v, int old, int new) -{ - int ret = atomic_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#endif - -#ifndef atomic_cmpxchg_release -static __always_inline int -atomic_cmpxchg_release(atomic_t *v, int old, int new) -{ - __atomic_release_fence(); - return atomic_cmpxchg_relaxed(v, old, new); -} -#define atomic_cmpxchg_release atomic_cmpxchg_release -#endif - -#ifndef atomic_cmpxchg -static __always_inline int -atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic_cmpxchg atomic_cmpxchg -#endif - -#endif /* atomic_cmpxchg_relaxed */ - -#define arch_atomic_try_cmpxchg atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed - -#ifndef atomic_try_cmpxchg_relaxed -#ifdef atomic_try_cmpxchg -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg -#define atomic_try_cmpxchg_release atomic_try_cmpxchg -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg -#endif /* atomic_try_cmpxchg */ - -#ifndef atomic_try_cmpxchg -static __always_inline bool -atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif - -#ifndef atomic_try_cmpxchg_acquire -static __always_inline bool -atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif - -#ifndef atomic_try_cmpxchg_release -static __always_inline bool -atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif - -#ifndef atomic_try_cmpxchg_relaxed -static __always_inline bool -atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed -#endif - -#else /* atomic_try_cmpxchg_relaxed */ - -#ifndef atomic_try_cmpxchg_acquire -static __always_inline bool -atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - bool ret = atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif - -#ifndef atomic_try_cmpxchg_release -static __always_inline bool -atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - __atomic_release_fence(); - return atomic_try_cmpxchg_relaxed(v, old, new); -} -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif - -#ifndef atomic_try_cmpxchg -static __always_inline bool -atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif - -#endif /* atomic_try_cmpxchg_relaxed */ - -#define arch_atomic_sub_and_test atomic_sub_and_test - -#ifndef atomic_sub_and_test -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic_sub_and_test(int i, atomic_t *v) -{ - return atomic_sub_return(i, v) == 0; -} -#define atomic_sub_and_test atomic_sub_and_test -#endif - -#define arch_atomic_dec_and_test atomic_dec_and_test - -#ifndef atomic_dec_and_test -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -atomic_dec_and_test(atomic_t *v) -{ - return atomic_dec_return(v) == 0; -} -#define atomic_dec_and_test atomic_dec_and_test -#endif - -#define arch_atomic_inc_and_test atomic_inc_and_test - -#ifndef atomic_inc_and_test -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic_inc_and_test(atomic_t *v) -{ - return atomic_inc_return(v) == 0; -} -#define atomic_inc_and_test atomic_inc_and_test -#endif - -#define arch_atomic_add_negative atomic_add_negative - -#ifndef atomic_add_negative -/** - * atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} -#define atomic_add_negative atomic_add_negative -#endif - -#define arch_atomic_fetch_add_unless atomic_fetch_add_unless - -#ifndef atomic_fetch_add_unless -/** - * atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline int -atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - int c = atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define atomic_fetch_add_unless atomic_fetch_add_unless -#endif - -#define arch_atomic_add_unless atomic_add_unless - -#ifndef atomic_add_unless -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -atomic_add_unless(atomic_t *v, int a, int u) -{ - return atomic_fetch_add_unless(v, a, u) != u; -} -#define atomic_add_unless atomic_add_unless -#endif - -#define arch_atomic_inc_not_zero atomic_inc_not_zero - -#ifndef atomic_inc_not_zero -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -atomic_inc_not_zero(atomic_t *v) -{ - return atomic_add_unless(v, 1, 0); -} -#define atomic_inc_not_zero atomic_inc_not_zero -#endif - -#define arch_atomic_inc_unless_negative atomic_inc_unless_negative - -#ifndef atomic_inc_unless_negative -static __always_inline bool -atomic_inc_unless_negative(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define atomic_inc_unless_negative atomic_inc_unless_negative -#endif - -#define arch_atomic_dec_unless_positive atomic_dec_unless_positive - -#ifndef atomic_dec_unless_positive -static __always_inline bool -atomic_dec_unless_positive(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define atomic_dec_unless_positive atomic_dec_unless_positive -#endif - -#define arch_atomic_dec_if_positive atomic_dec_if_positive - -#ifndef atomic_dec_if_positive -static __always_inline int -atomic_dec_if_positive(atomic_t *v) -{ - int dec, c = atomic_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define atomic_dec_if_positive atomic_dec_if_positive -#endif - -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> -#endif - -#define arch_atomic64_read atomic64_read -#define arch_atomic64_read_acquire atomic64_read_acquire - -#ifndef atomic64_read_acquire -static __always_inline s64 -atomic64_read_acquire(const atomic64_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define atomic64_read_acquire atomic64_read_acquire -#endif - -#define arch_atomic64_set atomic64_set -#define arch_atomic64_set_release atomic64_set_release - -#ifndef atomic64_set_release -static __always_inline void -atomic64_set_release(atomic64_t *v, s64 i) -{ - smp_store_release(&(v)->counter, i); -} -#define atomic64_set_release atomic64_set_release -#endif - -#define arch_atomic64_add atomic64_add - -#define arch_atomic64_add_return atomic64_add_return -#define arch_atomic64_add_return_acquire atomic64_add_return_acquire -#define arch_atomic64_add_return_release atomic64_add_return_release -#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed - -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return -#define atomic64_add_return_relaxed atomic64_add_return -#else /* atomic64_add_return_relaxed */ - -#ifndef atomic64_add_return_acquire -static __always_inline s64 -atomic64_add_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_add_return_acquire atomic64_add_return_acquire -#endif - -#ifndef atomic64_add_return_release -static __always_inline s64 -atomic64_add_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_add_return_relaxed(i, v); -} -#define atomic64_add_return_release atomic64_add_return_release -#endif - -#ifndef atomic64_add_return -static __always_inline s64 -atomic64_add_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_add_return atomic64_add_return -#endif - -#endif /* atomic64_add_return_relaxed */ - -#define arch_atomic64_fetch_add atomic64_fetch_add -#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#define arch_atomic64_fetch_add_release atomic64_fetch_add_release -#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed - -#ifndef atomic64_fetch_add_relaxed -#define atomic64_fetch_add_acquire atomic64_fetch_add -#define atomic64_fetch_add_release atomic64_fetch_add -#define atomic64_fetch_add_relaxed atomic64_fetch_add -#else /* atomic64_fetch_add_relaxed */ - -#ifndef atomic64_fetch_add_acquire -static __always_inline s64 -atomic64_fetch_add_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#endif - -#ifndef atomic64_fetch_add_release -static __always_inline s64 -atomic64_fetch_add_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_add_relaxed(i, v); -} -#define atomic64_fetch_add_release atomic64_fetch_add_release -#endif - -#ifndef atomic64_fetch_add -static __always_inline s64 -atomic64_fetch_add(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_add atomic64_fetch_add -#endif - -#endif /* atomic64_fetch_add_relaxed */ - -#define arch_atomic64_sub atomic64_sub - -#define arch_atomic64_sub_return atomic64_sub_return -#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire -#define arch_atomic64_sub_return_release atomic64_sub_return_release -#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed - -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return -#define atomic64_sub_return_relaxed atomic64_sub_return -#else /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_sub_return_acquire -static __always_inline s64 -atomic64_sub_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#endif - -#ifndef atomic64_sub_return_release -static __always_inline s64 -atomic64_sub_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_sub_return_relaxed(i, v); -} -#define atomic64_sub_return_release atomic64_sub_return_release -#endif - -#ifndef atomic64_sub_return -static __always_inline s64 -atomic64_sub_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_sub_return atomic64_sub_return -#endif - -#endif /* atomic64_sub_return_relaxed */ - -#define arch_atomic64_fetch_sub atomic64_fetch_sub -#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release -#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed - -#ifndef atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_acquire atomic64_fetch_sub -#define atomic64_fetch_sub_release atomic64_fetch_sub -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub -#else /* atomic64_fetch_sub_relaxed */ - -#ifndef atomic64_fetch_sub_acquire -static __always_inline s64 -atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#endif - -#ifndef atomic64_fetch_sub_release -static __always_inline s64 -atomic64_fetch_sub_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_sub_relaxed(i, v); -} -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#endif - -#ifndef atomic64_fetch_sub -static __always_inline s64 -atomic64_fetch_sub(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_sub atomic64_fetch_sub -#endif - -#endif /* atomic64_fetch_sub_relaxed */ - -#define arch_atomic64_inc atomic64_inc - -#ifndef atomic64_inc -static __always_inline void -atomic64_inc(atomic64_t *v) -{ - atomic64_add(1, v); -} -#define atomic64_inc atomic64_inc -#endif - -#define arch_atomic64_inc_return atomic64_inc_return -#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire -#define arch_atomic64_inc_return_release atomic64_inc_return_release -#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed - -#ifndef atomic64_inc_return_relaxed -#ifdef atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return -#define atomic64_inc_return_relaxed atomic64_inc_return -#endif /* atomic64_inc_return */ - -#ifndef atomic64_inc_return -static __always_inline s64 -atomic64_inc_return(atomic64_t *v) -{ - return atomic64_add_return(1, v); -} -#define atomic64_inc_return atomic64_inc_return -#endif - -#ifndef atomic64_inc_return_acquire -static __always_inline s64 -atomic64_inc_return_acquire(atomic64_t *v) -{ - return atomic64_add_return_acquire(1, v); -} -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif - -#ifndef atomic64_inc_return_release -static __always_inline s64 -atomic64_inc_return_release(atomic64_t *v) -{ - return atomic64_add_return_release(1, v); -} -#define atomic64_inc_return_release atomic64_inc_return_release -#endif - -#ifndef atomic64_inc_return_relaxed -static __always_inline s64 -atomic64_inc_return_relaxed(atomic64_t *v) -{ - return atomic64_add_return_relaxed(1, v); -} -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#endif - -#else /* atomic64_inc_return_relaxed */ - -#ifndef atomic64_inc_return_acquire -static __always_inline s64 -atomic64_inc_return_acquire(atomic64_t *v) -{ - s64 ret = atomic64_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif - -#ifndef atomic64_inc_return_release -static __always_inline s64 -atomic64_inc_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_inc_return_relaxed(v); -} -#define atomic64_inc_return_release atomic64_inc_return_release -#endif - -#ifndef atomic64_inc_return -static __always_inline s64 -atomic64_inc_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_inc_return atomic64_inc_return -#endif - -#endif /* atomic64_inc_return_relaxed */ - -#define arch_atomic64_fetch_inc atomic64_fetch_inc -#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release -#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed - -#ifndef atomic64_fetch_inc_relaxed -#ifdef atomic64_fetch_inc -#define atomic64_fetch_inc_acquire atomic64_fetch_inc -#define atomic64_fetch_inc_release atomic64_fetch_inc -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc -#endif /* atomic64_fetch_inc */ - -#ifndef atomic64_fetch_inc -static __always_inline s64 -atomic64_fetch_inc(atomic64_t *v) -{ - return atomic64_fetch_add(1, v); -} -#define atomic64_fetch_inc atomic64_fetch_inc -#endif - -#ifndef atomic64_fetch_inc_acquire -static __always_inline s64 -atomic64_fetch_inc_acquire(atomic64_t *v) -{ - return atomic64_fetch_add_acquire(1, v); -} -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif - -#ifndef atomic64_fetch_inc_release -static __always_inline s64 -atomic64_fetch_inc_release(atomic64_t *v) -{ - return atomic64_fetch_add_release(1, v); -} -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif - -#ifndef atomic64_fetch_inc_relaxed -static __always_inline s64 -atomic64_fetch_inc_relaxed(atomic64_t *v) -{ - return atomic64_fetch_add_relaxed(1, v); -} -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#endif - -#else /* atomic64_fetch_inc_relaxed */ - -#ifndef atomic64_fetch_inc_acquire -static __always_inline s64 -atomic64_fetch_inc_acquire(atomic64_t *v) -{ - s64 ret = atomic64_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif - -#ifndef atomic64_fetch_inc_release -static __always_inline s64 -atomic64_fetch_inc_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_inc_relaxed(v); -} -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif - -#ifndef atomic64_fetch_inc -static __always_inline s64 -atomic64_fetch_inc(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_inc atomic64_fetch_inc -#endif - -#endif /* atomic64_fetch_inc_relaxed */ - -#define arch_atomic64_dec atomic64_dec - -#ifndef atomic64_dec -static __always_inline void -atomic64_dec(atomic64_t *v) -{ - atomic64_sub(1, v); -} -#define atomic64_dec atomic64_dec -#endif - -#define arch_atomic64_dec_return atomic64_dec_return -#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire -#define arch_atomic64_dec_return_release atomic64_dec_return_release -#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed - -#ifndef atomic64_dec_return_relaxed -#ifdef atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return -#define atomic64_dec_return_relaxed atomic64_dec_return -#endif /* atomic64_dec_return */ - -#ifndef atomic64_dec_return -static __always_inline s64 -atomic64_dec_return(atomic64_t *v) -{ - return atomic64_sub_return(1, v); -} -#define atomic64_dec_return atomic64_dec_return -#endif - -#ifndef atomic64_dec_return_acquire -static __always_inline s64 -atomic64_dec_return_acquire(atomic64_t *v) -{ - return atomic64_sub_return_acquire(1, v); -} -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif - -#ifndef atomic64_dec_return_release -static __always_inline s64 -atomic64_dec_return_release(atomic64_t *v) -{ - return atomic64_sub_return_release(1, v); -} -#define atomic64_dec_return_release atomic64_dec_return_release -#endif - -#ifndef atomic64_dec_return_relaxed -static __always_inline s64 -atomic64_dec_return_relaxed(atomic64_t *v) -{ - return atomic64_sub_return_relaxed(1, v); -} -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#endif - -#else /* atomic64_dec_return_relaxed */ - -#ifndef atomic64_dec_return_acquire -static __always_inline s64 -atomic64_dec_return_acquire(atomic64_t *v) -{ - s64 ret = atomic64_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif - -#ifndef atomic64_dec_return_release -static __always_inline s64 -atomic64_dec_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_dec_return_relaxed(v); -} -#define atomic64_dec_return_release atomic64_dec_return_release -#endif - -#ifndef atomic64_dec_return -static __always_inline s64 -atomic64_dec_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_dec_return atomic64_dec_return -#endif - -#endif /* atomic64_dec_return_relaxed */ - -#define arch_atomic64_fetch_dec atomic64_fetch_dec -#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release -#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed - -#ifndef atomic64_fetch_dec_relaxed -#ifdef atomic64_fetch_dec -#define atomic64_fetch_dec_acquire atomic64_fetch_dec -#define atomic64_fetch_dec_release atomic64_fetch_dec -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec -#endif /* atomic64_fetch_dec */ - -#ifndef atomic64_fetch_dec -static __always_inline s64 -atomic64_fetch_dec(atomic64_t *v) -{ - return atomic64_fetch_sub(1, v); -} -#define atomic64_fetch_dec atomic64_fetch_dec -#endif - -#ifndef atomic64_fetch_dec_acquire -static __always_inline s64 -atomic64_fetch_dec_acquire(atomic64_t *v) -{ - return atomic64_fetch_sub_acquire(1, v); -} -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif - -#ifndef atomic64_fetch_dec_release -static __always_inline s64 -atomic64_fetch_dec_release(atomic64_t *v) -{ - return atomic64_fetch_sub_release(1, v); -} -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif - -#ifndef atomic64_fetch_dec_relaxed -static __always_inline s64 -atomic64_fetch_dec_relaxed(atomic64_t *v) -{ - return atomic64_fetch_sub_relaxed(1, v); -} -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#endif - -#else /* atomic64_fetch_dec_relaxed */ - -#ifndef atomic64_fetch_dec_acquire -static __always_inline s64 -atomic64_fetch_dec_acquire(atomic64_t *v) -{ - s64 ret = atomic64_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif - -#ifndef atomic64_fetch_dec_release -static __always_inline s64 -atomic64_fetch_dec_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_dec_relaxed(v); -} -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif - -#ifndef atomic64_fetch_dec -static __always_inline s64 -atomic64_fetch_dec(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_dec atomic64_fetch_dec -#endif - -#endif /* atomic64_fetch_dec_relaxed */ - -#define arch_atomic64_and atomic64_and - -#define arch_atomic64_fetch_and atomic64_fetch_and -#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#define arch_atomic64_fetch_and_release atomic64_fetch_and_release -#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed - -#ifndef atomic64_fetch_and_relaxed -#define atomic64_fetch_and_acquire atomic64_fetch_and -#define atomic64_fetch_and_release atomic64_fetch_and -#define atomic64_fetch_and_relaxed atomic64_fetch_and -#else /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_fetch_and_acquire -static __always_inline s64 -atomic64_fetch_and_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#endif - -#ifndef atomic64_fetch_and_release -static __always_inline s64 -atomic64_fetch_and_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_and_relaxed(i, v); -} -#define atomic64_fetch_and_release atomic64_fetch_and_release -#endif - -#ifndef atomic64_fetch_and -static __always_inline s64 -atomic64_fetch_and(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_and atomic64_fetch_and -#endif - -#endif /* atomic64_fetch_and_relaxed */ - -#define arch_atomic64_andnot atomic64_andnot - -#ifndef atomic64_andnot -static __always_inline void -atomic64_andnot(s64 i, atomic64_t *v) -{ - atomic64_and(~i, v); -} -#define atomic64_andnot atomic64_andnot -#endif - -#define arch_atomic64_fetch_andnot atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed - -#ifndef atomic64_fetch_andnot_relaxed -#ifdef atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#endif /* atomic64_fetch_andnot */ - -#ifndef atomic64_fetch_andnot -static __always_inline s64 -atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and(~i, v); -} -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif - -#ifndef atomic64_fetch_andnot_acquire -static __always_inline s64 -atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_acquire(~i, v); -} -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif - -#ifndef atomic64_fetch_andnot_release -static __always_inline s64 -atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_release(~i, v); -} -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif - -#ifndef atomic64_fetch_andnot_relaxed -static __always_inline s64 -atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_relaxed(~i, v); -} -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#endif - -#else /* atomic64_fetch_andnot_relaxed */ - -#ifndef atomic64_fetch_andnot_acquire -static __always_inline s64 -atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif - -#ifndef atomic64_fetch_andnot_release -static __always_inline s64 -atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_andnot_relaxed(i, v); -} -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif - -#ifndef atomic64_fetch_andnot -static __always_inline s64 -atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif - -#endif /* atomic64_fetch_andnot_relaxed */ - -#define arch_atomic64_or atomic64_or - -#define arch_atomic64_fetch_or atomic64_fetch_or -#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#define arch_atomic64_fetch_or_release atomic64_fetch_or_release -#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed - -#ifndef atomic64_fetch_or_relaxed -#define atomic64_fetch_or_acquire atomic64_fetch_or -#define atomic64_fetch_or_release atomic64_fetch_or -#define atomic64_fetch_or_relaxed atomic64_fetch_or -#else /* atomic64_fetch_or_relaxed */ - -#ifndef atomic64_fetch_or_acquire -static __always_inline s64 -atomic64_fetch_or_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#endif - -#ifndef atomic64_fetch_or_release -static __always_inline s64 -atomic64_fetch_or_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_or_relaxed(i, v); -} -#define atomic64_fetch_or_release atomic64_fetch_or_release -#endif - -#ifndef atomic64_fetch_or -static __always_inline s64 -atomic64_fetch_or(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_or atomic64_fetch_or -#endif - -#endif /* atomic64_fetch_or_relaxed */ - -#define arch_atomic64_xor atomic64_xor - -#define arch_atomic64_fetch_xor atomic64_fetch_xor -#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release -#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed - -#ifndef atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_acquire atomic64_fetch_xor -#define atomic64_fetch_xor_release atomic64_fetch_xor -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor -#else /* atomic64_fetch_xor_relaxed */ - -#ifndef atomic64_fetch_xor_acquire -static __always_inline s64 -atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#endif - -#ifndef atomic64_fetch_xor_release -static __always_inline s64 -atomic64_fetch_xor_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_xor_relaxed(i, v); -} -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#endif - -#ifndef atomic64_fetch_xor -static __always_inline s64 -atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_xor atomic64_fetch_xor -#endif - -#endif /* atomic64_fetch_xor_relaxed */ - -#define arch_atomic64_xchg atomic64_xchg -#define arch_atomic64_xchg_acquire atomic64_xchg_acquire -#define arch_atomic64_xchg_release atomic64_xchg_release -#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed - -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg -#define atomic64_xchg_relaxed atomic64_xchg -#else /* atomic64_xchg_relaxed */ - -#ifndef atomic64_xchg_acquire -static __always_inline s64 -atomic64_xchg_acquire(atomic64_t *v, s64 i) -{ - s64 ret = atomic64_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_xchg_acquire atomic64_xchg_acquire -#endif - -#ifndef atomic64_xchg_release -static __always_inline s64 -atomic64_xchg_release(atomic64_t *v, s64 i) -{ - __atomic_release_fence(); - return atomic64_xchg_relaxed(v, i); -} -#define atomic64_xchg_release atomic64_xchg_release -#endif - -#ifndef atomic64_xchg -static __always_inline s64 -atomic64_xchg(atomic64_t *v, s64 i) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_xchg atomic64_xchg -#endif - -#endif /* atomic64_xchg_relaxed */ - -#define arch_atomic64_cmpxchg atomic64_cmpxchg -#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release -#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed - -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#else /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_cmpxchg_acquire -static __always_inline s64 -atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) -{ - s64 ret = atomic64_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#endif - -#ifndef atomic64_cmpxchg_release -static __always_inline s64 -atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) -{ - __atomic_release_fence(); - return atomic64_cmpxchg_relaxed(v, old, new); -} -#define atomic64_cmpxchg_release atomic64_cmpxchg_release -#endif - -#ifndef atomic64_cmpxchg -static __always_inline s64 -atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_cmpxchg atomic64_cmpxchg -#endif - -#endif /* atomic64_cmpxchg_relaxed */ - -#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed - -#ifndef atomic64_try_cmpxchg_relaxed -#ifdef atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg -#endif /* atomic64_try_cmpxchg */ - -#ifndef atomic64_try_cmpxchg -static __always_inline bool -atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif - -#ifndef atomic64_try_cmpxchg_acquire -static __always_inline bool -atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif - -#ifndef atomic64_try_cmpxchg_release -static __always_inline bool -atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif - -#ifndef atomic64_try_cmpxchg_relaxed -static __always_inline bool -atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed -#endif - -#else /* atomic64_try_cmpxchg_relaxed */ - -#ifndef atomic64_try_cmpxchg_acquire -static __always_inline bool -atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif - -#ifndef atomic64_try_cmpxchg_release -static __always_inline bool -atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - __atomic_release_fence(); - return atomic64_try_cmpxchg_relaxed(v, old, new); -} -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif - -#ifndef atomic64_try_cmpxchg -static __always_inline bool -atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif - -#endif /* atomic64_try_cmpxchg_relaxed */ - -#define arch_atomic64_sub_and_test atomic64_sub_and_test - -#ifndef atomic64_sub_and_test -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic64_sub_and_test(s64 i, atomic64_t *v) -{ - return atomic64_sub_return(i, v) == 0; -} -#define atomic64_sub_and_test atomic64_sub_and_test -#endif - -#define arch_atomic64_dec_and_test atomic64_dec_and_test - -#ifndef atomic64_dec_and_test -/** - * atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -atomic64_dec_and_test(atomic64_t *v) -{ - return atomic64_dec_return(v) == 0; -} -#define atomic64_dec_and_test atomic64_dec_and_test -#endif - -#define arch_atomic64_inc_and_test atomic64_inc_and_test - -#ifndef atomic64_inc_and_test -/** - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic64_inc_and_test(atomic64_t *v) -{ - return atomic64_inc_return(v) == 0; -} -#define atomic64_inc_and_test atomic64_inc_and_test -#endif - -#define arch_atomic64_add_negative atomic64_add_negative - -#ifndef atomic64_add_negative -/** - * atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -atomic64_add_negative(s64 i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} -#define atomic64_add_negative atomic64_add_negative -#endif - -#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless - -#ifndef atomic64_fetch_add_unless -/** - * atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline s64 -atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic64_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -#endif - -#define arch_atomic64_add_unless atomic64_add_unless - -#ifndef atomic64_add_unless -/** - * atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -atomic64_add_unless(atomic64_t *v, s64 a, s64 u) -{ - return atomic64_fetch_add_unless(v, a, u) != u; -} -#define atomic64_add_unless atomic64_add_unless -#endif - -#define arch_atomic64_inc_not_zero atomic64_inc_not_zero - -#ifndef atomic64_inc_not_zero -/** - * atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -atomic64_inc_not_zero(atomic64_t *v) -{ - return atomic64_add_unless(v, 1, 0); -} -#define atomic64_inc_not_zero atomic64_inc_not_zero -#endif - -#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative - -#ifndef atomic64_inc_unless_negative -static __always_inline bool -atomic64_inc_unless_negative(atomic64_t *v) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define atomic64_inc_unless_negative atomic64_inc_unless_negative -#endif - -#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive - -#ifndef atomic64_dec_unless_positive -static __always_inline bool -atomic64_dec_unless_positive(atomic64_t *v) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define atomic64_dec_unless_positive atomic64_dec_unless_positive -#endif - -#define arch_atomic64_dec_if_positive atomic64_dec_if_positive - -#ifndef atomic64_dec_if_positive -static __always_inline s64 -atomic64_dec_if_positive(atomic64_t *v) -{ - s64 dec, c = atomic64_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic64_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define atomic64_dec_if_positive atomic64_dec_if_positive -#endif - -#endif /* _LINUX_ATOMIC_FALLBACK_H */ -// d78e6c293c661c15188f0ec05bce45188c8d5892 diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 571a11008ab5..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,13 +77,8 @@ __ret; \ }) -#ifdef ARCH_ATOMIC -#include <linux/atomic-arch-fallback.h> -#include <asm-generic/atomic-instrumented.h> -#else -#include <linux/atomic-fallback.h> -#endif - -#include <asm-generic/atomic-long.h> +#include <linux/atomic/atomic-arch-fallback.h> +#include <linux/atomic/atomic-long.h> +#include <linux/atomic/atomic-instrumented.h> #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..a3dba31df01e 100644 --- a/include/linux/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h diff --git a/include/asm-generic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index 888b6cfeed91..a0f654370da3 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -14,8 +14,8 @@ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid * double instrumentation. */ -#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H -#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> #include <linux/compiler.h> @@ -27,17 +27,13 @@ atomic_read(const atomic_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } -#define atomic_read atomic_read -#if defined(arch_atomic_read_acquire) static __always_inline int atomic_read_acquire(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } -#define atomic_read_acquire atomic_read_acquire -#endif static __always_inline void atomic_set(atomic_t *v, int i) @@ -45,17 +41,13 @@ atomic_set(atomic_t *v, int i) instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } -#define atomic_set atomic_set -#if defined(arch_atomic_set_release) static __always_inline void atomic_set_release(atomic_t *v, int i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } -#define atomic_set_release atomic_set_release -#endif static __always_inline void atomic_add(int i, atomic_t *v) @@ -63,87 +55,62 @@ atomic_add(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_add(i, v); } -#define atomic_add atomic_add -#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) static __always_inline int atomic_add_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } -#define atomic_add_return atomic_add_return -#endif -#if defined(arch_atomic_add_return_acquire) static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } -#define atomic_add_return_acquire atomic_add_return_acquire -#endif -#if defined(arch_atomic_add_return_release) static __always_inline int atomic_add_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } -#define atomic_add_return_release atomic_add_return_release -#endif -#if defined(arch_atomic_add_return_relaxed) static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } -#define atomic_add_return_relaxed atomic_add_return_relaxed -#endif -#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } -#define atomic_fetch_add atomic_fetch_add -#endif -#if defined(arch_atomic_fetch_add_acquire) static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#endif -#if defined(arch_atomic_fetch_add_release) static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } -#define atomic_fetch_add_release atomic_fetch_add_release -#endif -#if defined(arch_atomic_fetch_add_relaxed) static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#endif static __always_inline void atomic_sub(int i, atomic_t *v) @@ -151,267 +118,188 @@ atomic_sub(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_sub(i, v); } -#define atomic_sub atomic_sub -#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) static __always_inline int atomic_sub_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } -#define atomic_sub_return atomic_sub_return -#endif -#if defined(arch_atomic_sub_return_acquire) static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } -#define atomic_sub_return_acquire atomic_sub_return_acquire -#endif -#if defined(arch_atomic_sub_return_release) static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } -#define atomic_sub_return_release atomic_sub_return_release -#endif -#if defined(arch_atomic_sub_return_relaxed) static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#endif -#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } -#define atomic_fetch_sub atomic_fetch_sub -#endif -#if defined(arch_atomic_fetch_sub_acquire) static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#endif -#if defined(arch_atomic_fetch_sub_release) static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } -#define atomic_fetch_sub_release atomic_fetch_sub_release -#endif -#if defined(arch_atomic_fetch_sub_relaxed) static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#endif -#if defined(arch_atomic_inc) static __always_inline void atomic_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_inc(v); } -#define atomic_inc atomic_inc -#endif -#if defined(arch_atomic_inc_return) static __always_inline int atomic_inc_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } -#define atomic_inc_return atomic_inc_return -#endif -#if defined(arch_atomic_inc_return_acquire) static __always_inline int atomic_inc_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif -#if defined(arch_atomic_inc_return_release) static __always_inline int atomic_inc_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } -#define atomic_inc_return_release atomic_inc_return_release -#endif -#if defined(arch_atomic_inc_return_relaxed) static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#endif -#if defined(arch_atomic_fetch_inc) static __always_inline int atomic_fetch_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } -#define atomic_fetch_inc atomic_fetch_inc -#endif -#if defined(arch_atomic_fetch_inc_acquire) static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif -#if defined(arch_atomic_fetch_inc_release) static __always_inline int atomic_fetch_inc_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif -#if defined(arch_atomic_fetch_inc_relaxed) static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#endif -#if defined(arch_atomic_dec) static __always_inline void atomic_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_dec(v); } -#define atomic_dec atomic_dec -#endif -#if defined(arch_atomic_dec_return) static __always_inline int atomic_dec_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } -#define atomic_dec_return atomic_dec_return -#endif -#if defined(arch_atomic_dec_return_acquire) static __always_inline int atomic_dec_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif -#if defined(arch_atomic_dec_return_release) static __always_inline int atomic_dec_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } -#define atomic_dec_return_release atomic_dec_return_release -#endif -#if defined(arch_atomic_dec_return_relaxed) static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#endif -#if defined(arch_atomic_fetch_dec) static __always_inline int atomic_fetch_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } -#define atomic_fetch_dec atomic_fetch_dec -#endif -#if defined(arch_atomic_fetch_dec_acquire) static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif -#if defined(arch_atomic_fetch_dec_release) static __always_inline int atomic_fetch_dec_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif -#if defined(arch_atomic_fetch_dec_relaxed) static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#endif static __always_inline void atomic_and(int i, atomic_t *v) @@ -419,97 +307,69 @@ atomic_and(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_and(i, v); } -#define atomic_and atomic_and -#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) static __always_inline int atomic_fetch_and(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } -#define atomic_fetch_and atomic_fetch_and -#endif -#if defined(arch_atomic_fetch_and_acquire) static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#endif -#if defined(arch_atomic_fetch_and_release) static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } -#define atomic_fetch_and_release atomic_fetch_and_release -#endif -#if defined(arch_atomic_fetch_and_relaxed) static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#endif -#if defined(arch_atomic_andnot) static __always_inline void atomic_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } -#define atomic_andnot atomic_andnot -#endif -#if defined(arch_atomic_fetch_andnot) static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } -#define atomic_fetch_andnot atomic_fetch_andnot -#endif -#if defined(arch_atomic_fetch_andnot_acquire) static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif -#if defined(arch_atomic_fetch_andnot_release) static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif -#if defined(arch_atomic_fetch_andnot_relaxed) static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#endif static __always_inline void atomic_or(int i, atomic_t *v) @@ -517,47 +377,34 @@ atomic_or(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_or(i, v); } -#define atomic_or atomic_or -#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) static __always_inline int atomic_fetch_or(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } -#define atomic_fetch_or atomic_fetch_or -#endif -#if defined(arch_atomic_fetch_or_acquire) static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#endif -#if defined(arch_atomic_fetch_or_release) static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } -#define atomic_fetch_or_release atomic_fetch_or_release -#endif -#if defined(arch_atomic_fetch_or_relaxed) static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#endif static __always_inline void atomic_xor(int i, atomic_t *v) @@ -565,129 +412,91 @@ atomic_xor(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_xor(i, v); } -#define atomic_xor atomic_xor -#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } -#define atomic_fetch_xor atomic_fetch_xor -#endif -#if defined(arch_atomic_fetch_xor_acquire) static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#endif -#if defined(arch_atomic_fetch_xor_release) static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } -#define atomic_fetch_xor_release atomic_fetch_xor_release -#endif -#if defined(arch_atomic_fetch_xor_relaxed) static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#endif -#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) static __always_inline int atomic_xchg(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } -#define atomic_xchg atomic_xchg -#endif -#if defined(arch_atomic_xchg_acquire) static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } -#define atomic_xchg_acquire atomic_xchg_acquire -#endif -#if defined(arch_atomic_xchg_release) static __always_inline int atomic_xchg_release(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } -#define atomic_xchg_release atomic_xchg_release -#endif -#if defined(arch_atomic_xchg_relaxed) static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } -#define atomic_xchg_relaxed atomic_xchg_relaxed -#endif -#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } -#define atomic_cmpxchg atomic_cmpxchg -#endif -#if defined(arch_atomic_cmpxchg_acquire) static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#endif -#if defined(arch_atomic_cmpxchg_release) static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } -#define atomic_cmpxchg_release atomic_cmpxchg_release -#endif -#if defined(arch_atomic_cmpxchg_relaxed) static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -#endif -#if defined(arch_atomic_try_cmpxchg) static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { @@ -695,10 +504,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif -#if defined(arch_atomic_try_cmpxchg_acquire) static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { @@ -706,10 +512,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif -#if defined(arch_atomic_try_cmpxchg_release) static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { @@ -717,10 +520,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif -#if defined(arch_atomic_try_cmpxchg_relaxed) static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { @@ -728,108 +528,76 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic_sub_and_test) static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } -#define atomic_sub_and_test atomic_sub_and_test -#endif -#if defined(arch_atomic_dec_and_test) static __always_inline bool atomic_dec_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } -#define atomic_dec_and_test atomic_dec_and_test -#endif -#if defined(arch_atomic_inc_and_test) static __always_inline bool atomic_inc_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } -#define atomic_inc_and_test atomic_inc_and_test -#endif -#if defined(arch_atomic_add_negative) static __always_inline bool atomic_add_negative(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } -#define atomic_add_negative atomic_add_negative -#endif -#if defined(arch_atomic_fetch_add_unless) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } -#define atomic_fetch_add_unless atomic_fetch_add_unless -#endif -#if defined(arch_atomic_add_unless) static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } -#define atomic_add_unless atomic_add_unless -#endif -#if defined(arch_atomic_inc_not_zero) static __always_inline bool atomic_inc_not_zero(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } -#define atomic_inc_not_zero atomic_inc_not_zero -#endif -#if defined(arch_atomic_inc_unless_negative) static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } -#define atomic_inc_unless_negative atomic_inc_unless_negative -#endif -#if defined(arch_atomic_dec_unless_positive) static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } -#define atomic_dec_unless_positive atomic_dec_unless_positive -#endif -#if defined(arch_atomic_dec_if_positive) static __always_inline int atomic_dec_if_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } -#define atomic_dec_if_positive atomic_dec_if_positive -#endif static __always_inline s64 atomic64_read(const atomic64_t *v) @@ -837,17 +605,13 @@ atomic64_read(const atomic64_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } -#define atomic64_read atomic64_read -#if defined(arch_atomic64_read_acquire) static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } -#define atomic64_read_acquire atomic64_read_acquire -#endif static __always_inline void atomic64_set(atomic64_t *v, s64 i) @@ -855,17 +619,13 @@ atomic64_set(atomic64_t *v, s64 i) instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } -#define atomic64_set atomic64_set -#if defined(arch_atomic64_set_release) static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } -#define atomic64_set_release atomic64_set_release -#endif static __always_inline void atomic64_add(s64 i, atomic64_t *v) @@ -873,87 +633,62 @@ atomic64_add(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_add(i, v); } -#define atomic64_add atomic64_add -#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } -#define atomic64_add_return atomic64_add_return -#endif -#if defined(arch_atomic64_add_return_acquire) static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } -#define atomic64_add_return_acquire atomic64_add_return_acquire -#endif -#if defined(arch_atomic64_add_return_release) static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } -#define atomic64_add_return_release atomic64_add_return_release -#endif -#if defined(arch_atomic64_add_return_relaxed) static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } -#define atomic64_fetch_add atomic64_fetch_add -#endif -#if defined(arch_atomic64_fetch_add_acquire) static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#endif -#if defined(arch_atomic64_fetch_add_release) static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } -#define atomic64_fetch_add_release atomic64_fetch_add_release -#endif -#if defined(arch_atomic64_fetch_add_relaxed) static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#endif static __always_inline void atomic64_sub(s64 i, atomic64_t *v) @@ -961,267 +696,188 @@ atomic64_sub(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } -#define atomic64_sub atomic64_sub -#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } -#define atomic64_sub_return atomic64_sub_return -#endif -#if defined(arch_atomic64_sub_return_acquire) static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#endif -#if defined(arch_atomic64_sub_return_release) static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } -#define atomic64_sub_return_release atomic64_sub_return_release -#endif -#if defined(arch_atomic64_sub_return_relaxed) static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } -#define atomic64_fetch_sub atomic64_fetch_sub -#endif -#if defined(arch_atomic64_fetch_sub_acquire) static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#endif -#if defined(arch_atomic64_fetch_sub_release) static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#endif -#if defined(arch_atomic64_fetch_sub_relaxed) static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#endif -#if defined(arch_atomic64_inc) static __always_inline void atomic64_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_inc(v); } -#define atomic64_inc atomic64_inc -#endif -#if defined(arch_atomic64_inc_return) static __always_inline s64 atomic64_inc_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } -#define atomic64_inc_return atomic64_inc_return -#endif -#if defined(arch_atomic64_inc_return_acquire) static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif -#if defined(arch_atomic64_inc_return_release) static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } -#define atomic64_inc_return_release atomic64_inc_return_release -#endif -#if defined(arch_atomic64_inc_return_relaxed) static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#endif -#if defined(arch_atomic64_fetch_inc) static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } -#define atomic64_fetch_inc atomic64_fetch_inc -#endif -#if defined(arch_atomic64_fetch_inc_acquire) static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif -#if defined(arch_atomic64_fetch_inc_release) static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif -#if defined(arch_atomic64_fetch_inc_relaxed) static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#endif -#if defined(arch_atomic64_dec) static __always_inline void atomic64_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_dec(v); } -#define atomic64_dec atomic64_dec -#endif -#if defined(arch_atomic64_dec_return) static __always_inline s64 atomic64_dec_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } -#define atomic64_dec_return atomic64_dec_return -#endif -#if defined(arch_atomic64_dec_return_acquire) static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif -#if defined(arch_atomic64_dec_return_release) static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } -#define atomic64_dec_return_release atomic64_dec_return_release -#endif -#if defined(arch_atomic64_dec_return_relaxed) static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#endif -#if defined(arch_atomic64_fetch_dec) static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } -#define atomic64_fetch_dec atomic64_fetch_dec -#endif -#if defined(arch_atomic64_fetch_dec_acquire) static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif -#if defined(arch_atomic64_fetch_dec_release) static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif -#if defined(arch_atomic64_fetch_dec_relaxed) static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#endif static __always_inline void atomic64_and(s64 i, atomic64_t *v) @@ -1229,97 +885,69 @@ atomic64_and(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_and(i, v); } -#define atomic64_and atomic64_and -#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } -#define atomic64_fetch_and atomic64_fetch_and -#endif -#if defined(arch_atomic64_fetch_and_acquire) static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#endif -#if defined(arch_atomic64_fetch_and_release) static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } -#define atomic64_fetch_and_release atomic64_fetch_and_release -#endif -#if defined(arch_atomic64_fetch_and_relaxed) static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#endif -#if defined(arch_atomic64_andnot) static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } -#define atomic64_andnot atomic64_andnot -#endif -#if defined(arch_atomic64_fetch_andnot) static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif -#if defined(arch_atomic64_fetch_andnot_acquire) static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif -#if defined(arch_atomic64_fetch_andnot_release) static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif -#if defined(arch_atomic64_fetch_andnot_relaxed) static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#endif static __always_inline void atomic64_or(s64 i, atomic64_t *v) @@ -1327,47 +955,34 @@ atomic64_or(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_or(i, v); } -#define atomic64_or atomic64_or -#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } -#define atomic64_fetch_or atomic64_fetch_or -#endif -#if defined(arch_atomic64_fetch_or_acquire) static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#endif -#if defined(arch_atomic64_fetch_or_release) static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } -#define atomic64_fetch_or_release atomic64_fetch_or_release -#endif -#if defined(arch_atomic64_fetch_or_relaxed) static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#endif static __always_inline void atomic64_xor(s64 i, atomic64_t *v) @@ -1375,129 +990,91 @@ atomic64_xor(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } -#define atomic64_xor atomic64_xor -#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } -#define atomic64_fetch_xor atomic64_fetch_xor -#endif -#if defined(arch_atomic64_fetch_xor_acquire) static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#endif -#if defined(arch_atomic64_fetch_xor_release) static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#endif -#if defined(arch_atomic64_fetch_xor_relaxed) static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#endif -#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } -#define atomic64_xchg atomic64_xchg -#endif -#if defined(arch_atomic64_xchg_acquire) static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } -#define atomic64_xchg_acquire atomic64_xchg_acquire -#endif -#if defined(arch_atomic64_xchg_release) static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } -#define atomic64_xchg_release atomic64_xchg_release -#endif -#if defined(arch_atomic64_xchg_relaxed) static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } -#define atomic64_xchg_relaxed atomic64_xchg_relaxed -#endif -#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } -#define atomic64_cmpxchg atomic64_cmpxchg -#endif -#if defined(arch_atomic64_cmpxchg_acquire) static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } -#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#endif -#if defined(arch_atomic64_cmpxchg_release) static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } -#define atomic64_cmpxchg_release atomic64_cmpxchg_release -#endif -#if defined(arch_atomic64_cmpxchg_relaxed) static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_try_cmpxchg) static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { @@ -1505,10 +1082,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif -#if defined(arch_atomic64_try_cmpxchg_acquire) static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { @@ -1516,10 +1090,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif -#if defined(arch_atomic64_try_cmpxchg_release) static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { @@ -1527,10 +1098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif -#if defined(arch_atomic64_try_cmpxchg_relaxed) static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { @@ -1538,218 +1106,739 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_sub_and_test) static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } -#define atomic64_sub_and_test atomic64_sub_and_test -#endif -#if defined(arch_atomic64_dec_and_test) static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } -#define atomic64_dec_and_test atomic64_dec_and_test -#endif -#if defined(arch_atomic64_inc_and_test) static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } -#define atomic64_inc_and_test atomic64_inc_and_test -#endif -#if defined(arch_atomic64_add_negative) static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } -#define atomic64_add_negative atomic64_add_negative -#endif -#if defined(arch_atomic64_fetch_add_unless) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -#endif -#if defined(arch_atomic64_add_unless) static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } -#define atomic64_add_unless atomic64_add_unless -#endif -#if defined(arch_atomic64_inc_not_zero) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } -#define atomic64_inc_not_zero atomic64_inc_not_zero -#endif -#if defined(arch_atomic64_inc_unless_negative) static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } -#define atomic64_inc_unless_negative atomic64_inc_unless_negative -#endif -#if defined(arch_atomic64_dec_unless_positive) static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } -#define atomic64_dec_unless_positive atomic64_dec_unless_positive -#endif -#if defined(arch_atomic64_dec_if_positive) static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } -#define atomic64_dec_if_positive atomic64_dec_if_positive -#endif -#if !defined(arch_xchg_relaxed) || defined(arch_xchg) +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_if_positive(v); +} + #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_acquire) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_release) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_relaxed) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_acquire) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_release) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_relaxed) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_acquire) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_release) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_relaxed) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg) #define try_cmpxchg(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1758,9 +1847,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_acquire) #define try_cmpxchg_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1769,9 +1856,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_release) #define try_cmpxchg_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1780,9 +1865,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_relaxed) #define try_cmpxchg_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1791,7 +1874,6 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif #define cmpxchg_local(ptr, ...) \ ({ \ @@ -1829,5 +1911,5 @@ atomic64_dec_if_positive(atomic64_t *v) arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) -#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 4bec382e44520f4d8267e42620054db26a659ea3 +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +// 2a9553f0a9d5619f19151092df5cabbbf16ce835 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h new file mode 100644 index 000000000000..800b8c35992d --- /dev/null +++ b/include/linux/atomic/atomic-long.h @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_LONG_H +#define _LINUX_ATOMIC_LONG_H + +#include <linux/compiler.h> +#include <asm/types.h> + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic64_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic64_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic64_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic64_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic64_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic64_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic64_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic64_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic64_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic64_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic64_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic64_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic64_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic64_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic64_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _LINUX_ATOMIC_LONG_H */ +// e8f0e08ff072b74d180eabe2ad001282b38c2c88 diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 8612f8fc86c1..db0e099c2399 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -412,9 +412,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); * PF removes the filters and returns status. */ +/* VIRTCHNL_ETHER_ADDR_LEGACY + * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad + * bytes. Moving forward all VF drivers should not set type to + * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy + * behavior. The control plane function (i.e. PF) can use a best effort method + * of tracking the primary/device unicast in this case, but there is no + * guarantee and functionality depends on the implementation of the PF. + */ + +/* VIRTCHNL_ETHER_ADDR_PRIMARY + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the + * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and + * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane + * function (i.e. PF) to accurately track and use this MAC address for + * displaying on the host and for VM/function reset. + */ + +/* VIRTCHNL_ETHER_ADDR_EXTRA + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra + * unicast and/or multicast filters that are being added/deleted via + * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. + */ struct virtchnl_ether_addr { u8 addr[ETH_ALEN]; - u8 pad[2]; + u8 type; +#define VIRTCHNL_ETHER_ADDR_LEGACY 0 +#define VIRTCHNL_ETHER_ADDR_PRIMARY 1 +#define VIRTCHNL_ETHER_ADDR_EXTRA 2 +#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ + u8 pad; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index fff9367a6348..33207004cfde 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,7 @@ struct bdi_writeback { struct list_head b_dirty_time; /* time stamps are dirty */ spinlock_t list_lock; /* protects the b_* lists */ + atomic_t writeback_inodes; /* number of inodes under writeback */ struct percpu_counter stat[NR_WB_STAT_ITEMS]; unsigned long congested; /* WB_[a]sync_congested flags */ @@ -142,6 +143,7 @@ struct bdi_writeback { spinlock_t work_lock; /* protects work_list & dwork scheduling */ struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ unsigned long dirty_sleep; /* last wait */ @@ -154,6 +156,8 @@ struct bdi_writeback { struct cgroup_subsys_state *blkcg_css; /* and blkcg */ struct list_head memcg_node; /* anchored at memcg->cgwb_list */ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ + struct list_head b_attached; /* attached inodes, protected by list_lock */ + struct list_head offline_node; /* anchored at offline_cgwbs */ union { struct work_struct release_work; @@ -239,8 +243,9 @@ static inline void wb_get(struct bdi_writeback *wb) /** * wb_put - decrement a wb's refcount * @wb: bdi_writeback to put + * @nr: number of references to put */ -static inline void wb_put(struct bdi_writeback *wb) +static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) { if (WARN_ON_ONCE(!wb->bdi)) { /* @@ -251,7 +256,16 @@ static inline void wb_put(struct bdi_writeback *wb) } if (wb != &wb->bdi->wb) - percpu_ref_put(&wb->refcnt); + percpu_ref_put_many(&wb->refcnt, nr); +} + +/** + * wb_put - decrement a wb's refcount + * @wb: bdi_writeback to put + */ +static inline void wb_put(struct bdi_writeback *wb) +{ + wb_put_many(wb, 1); } /** @@ -280,6 +294,10 @@ static inline void wb_put(struct bdi_writeback *wb) { } +static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr) +{ +} + static inline bool wb_dying(struct bdi_writeback *wb) { return false; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 44df4fcef65c..ac7f231b8825 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_bdi; + return I_BDEV(inode)->bd_disk->bdi; #endif return sb->s_bdi; } @@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) return inode->i_wb; } +static inline struct bdi_writeback *inode_to_wb_wbc( + struct inode *inode, + struct writeback_control *wbc) +{ + /* + * If wbc does not have inode attached, it means cgroup writeback was + * disabled when wbc started. Just use the default wb in that case. + */ + return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; +} + /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode @@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) return &inode_to_bdi(inode)->wb; } +static inline struct bdi_writeback *inode_to_wb_wbc( + struct inode *inode, + struct writeback_control *wbc) +{ + return inode_to_wb(inode); +} + + static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { diff --git a/include/linux/bio.h b/include/linux/bio.h index a0b4cfdf62a4..00952e92eae1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -5,7 +5,6 @@ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H -#include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ @@ -44,9 +43,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs) #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) -#define bio_multiple_segments(bio) \ - ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) - #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) @@ -271,7 +267,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) { - *bv = bio_iovec(bio); + *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); } static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) @@ -279,10 +275,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) struct bvec_iter iter = bio->bi_iter; int idx; - if (unlikely(!bio_multiple_segments(bio))) { - *bv = bio_iovec(bio); - return; - } + bio_get_first_bvec(bio, bv); + if (bv->bv_len == bio->bi_iter.bi_size) + return; /* this bio only has a single bvec */ bio_advance_iter(bio, &iter, iter.bi_size); @@ -379,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip, #endif /* CONFIG_BLK_DEV_INTEGRITY */ -extern void bio_trim(struct bio *bio, int offset, int size); +void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); @@ -405,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), + BIOSET_PERCPU_CACHE = BIT(2), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); @@ -413,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, struct bio_set *bs); +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, + struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); @@ -523,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ -#ifdef CONFIG_HIGHMEM -/* - * remember never ever reenable interrupts between a bvec_kmap_irq and - * bvec_kunmap_irq! - */ -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - unsigned long addr; - - /* - * might not be a highmem page, but the preempt/irq count - * balancing is a lot nicer this way - */ - local_irq_save(*flags); - addr = (unsigned long) kmap_atomic(bvec->bv_page); - - BUG_ON(addr & ~PAGE_MASK); - - return (char *) addr + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - unsigned long ptr = (unsigned long) buffer & PAGE_MASK; - - kunmap_atomic((void *) ptr); - local_irq_restore(*flags); -} - -#else -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - return page_address(bvec->bv_page) + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - *flags = 0; -} -#endif - /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * @@ -703,6 +660,11 @@ struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; + /* + * per-cpu bio alloc cache + */ + struct bio_alloc_cache __percpu *cache; + mempool_t bio_pool; mempool_t bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -719,6 +681,11 @@ struct bio_set { struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; + + /* + * Hot un-plug notifier for the per-cpu cache, if used + */ + struct hlist_node cpuhp_dead; }; static inline bool bioset_initialized(struct bio_set *bs) @@ -822,4 +789,6 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); + #endif /* __LINUX_BIO_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a36cfcec4e77..37f36dad18bd 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -227,6 +227,12 @@ unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, un int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); +extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + +extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 26bf15e6cd35..5e62e2383b7f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -4,6 +4,7 @@ #include <asm/types.h> #include <linux/bits.h> +#include <linux/typecheck.h> #include <uapi/linux/kernel.h> @@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, __clear_bit(nr, addr); } +/** + * __ptr_set_bit - Set bit in a pointer's value + * @nr: the bit to set + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_set_bit(bit, &p); + */ +#define __ptr_set_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __set_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_clear_bit - Clear bit in a pointer's value + * @nr: the bit to clear + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_clear_bit(bit, &p); + */ +#define __ptr_clear_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __clear_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_test_bit - Test bit in a pointer's value + * @nr: the bit to test + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * if (__ptr_test_bit(bit, &p)) { + * ... + * } else { + * ... + * } + */ +#define __ptr_test_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + test_bit(nr, (unsigned long *)(addr)); \ + }) + #ifdef __KERNEL__ #ifndef set_mask_bits diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index b9f3c246c3c9..b4de2010fba5 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -30,6 +30,8 @@ /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX +#define FC_APPID_LEN 129 + #ifdef CONFIG_BLK_CGROUP @@ -55,6 +57,9 @@ struct blkcg { struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; struct list_head all_blkcgs_node; +#ifdef CONFIG_BLK_CGROUP_FC_APPID + char fc_app_id[FC_APPID_LEN]; +#endif #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif @@ -147,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); -typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, - size_t size); +typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, + struct seq_file *s); struct blkcg_policy { int plid; @@ -660,4 +665,62 @@ static inline void blk_cgroup_bio_start(struct bio *bio) { } #endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLK_CGROUP */ + +#ifdef CONFIG_BLK_CGROUP_FC_APPID +/* + * Sets the fc_app_id field associted to blkcg + * @app_id: application identifier + * @cgrp_id: cgroup id + * @app_id_len: size of application identifier + */ +static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len) +{ + struct cgroup *cgrp; + struct cgroup_subsys_state *css; + struct blkcg *blkcg; + int ret = 0; + + if (app_id_len > FC_APPID_LEN) + return -EINVAL; + + cgrp = cgroup_get_from_id(cgrp_id); + if (!cgrp) + return -ENOENT; + css = cgroup_get_e_css(cgrp, &io_cgrp_subsys); + if (!css) { + ret = -ENOENT; + goto out_cgrp_put; + } + blkcg = css_to_blkcg(css); + /* + * There is a slight race condition on setting the appid. + * Worst case an I/O may not find the right id. + * This is no different from the I/O we let pass while obtaining + * the vmid from the fabric. + * Adding the overhead of a lock is not necessary. + */ + strlcpy(blkcg->fc_app_id, app_id, app_id_len); + css_put(css); +out_cgrp_put: + cgroup_put(cgrp); + return ret; +} + +/** + * blkcg_get_fc_appid - get the fc app identifier associated with a bio + * @bio: target bio + * + * On success return the fc_app_id, on failure return NULL + */ +static inline char *blkcg_get_fc_appid(struct bio *bio) +{ + if (bio && bio->bi_blkg && + (bio->bi_blkg->blkcg->fc_app_id[0] != '\0')) + return bio->bi_blkg->blkcg->fc_app_id; + return NULL; +} +#else +static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; } +static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; } +#endif /*CONFIG_BLK_CGROUP_FC_APPID*/ #endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 359486940fa0..13ba1861e688 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -404,7 +404,13 @@ enum { BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, BLK_MQ_F_BLOCKING = 1 << 5, + /* Do not allow an I/O scheduler to be configured. */ BLK_MQ_F_NO_SCHED = 1 << 6, + /* + * Select 'none' during queue registration in case of a single hwq + * or shared hwqs instead of 'mq-deadline'. + */ + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -426,19 +432,23 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, + struct lock_class_key *lkclass); +#define blk_mq_alloc_disk(set, queuedata) \ +({ \ + static struct lock_class_key __key; \ + \ + __blk_mq_alloc_disk(set, queuedata, &__key); \ +}) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, - void *queuedata); -struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q, - bool elevator_init); -struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, - const struct blk_mq_ops *ops, - unsigned int queue_depth, - unsigned int set_flags); +int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); void blk_mq_unregister_dev(struct device *, struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, unsigned int queue_depth, + unsigned int set_flags); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index db026b6ec15a..be622b5a21ed 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -29,23 +29,15 @@ struct block_device { int bd_openers; struct inode * bd_inode; /* will die */ struct super_block * bd_super; - struct mutex bd_mutex; /* open/close mutex */ void * bd_claiming; struct device bd_device; void * bd_holder; int bd_holders; bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif struct kobject *bd_holder_dir; u8 bd_partno; - /* number of times partitions within this device have been opened. */ - unsigned bd_part_count; - spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; - struct backing_dev_info *bd_bdi; /* The counter of freeze processes */ int bd_fsfreeze_count; @@ -285,6 +277,7 @@ struct bio { }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) +#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) /* * bio flags @@ -304,6 +297,8 @@ enum { BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_REMAPPED, + BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ BIO_FLAG_LAST }; @@ -354,9 +349,6 @@ enum req_opf { /* reset all the zone present on the device */ REQ_OP_ZONE_RESET_ALL = 17, - /* SCSI passthrough using struct scsi_request */ - REQ_OP_SCSI_IN = 32, - REQ_OP_SCSI_OUT = 33, /* Driver private requests */ REQ_OP_DRV_IN = 34, REQ_OP_DRV_OUT = 35, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f69c75bd6d27..12b9dbcc980e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -11,30 +11,26 @@ #include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> -#include <linux/backing-dev-defs.h> #include <linux/wait.h> #include <linux/mempool.h> #include <linux/pfn.h> #include <linux/bio.h> #include <linux/stringify.h> #include <linux/gfp.h> -#include <linux/bsg.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/percpu-refcount.h> #include <linux/scatterlist.h> #include <linux/blkzoned.h> #include <linux/pm.h> +#include <linux/sbitmap.h> struct module; -struct scsi_ioctl_command; - struct request_queue; struct elevator_queue; struct blk_trace; struct request; struct sg_io_hdr; -struct bsg_job; struct blkcg_gq; struct blk_flush_queue; struct pr_ops; @@ -56,7 +52,7 @@ struct blk_keyslot_manager; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 5 +#define BLKCG_MAX_POLS 6 typedef void (rq_end_io_fn)(struct request *, blk_status_t); @@ -239,42 +235,15 @@ struct request { void *end_io_data; }; -static inline bool blk_op_is_scsi(unsigned int op) -{ - return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; -} - -static inline bool blk_op_is_private(unsigned int op) +static inline bool blk_op_is_passthrough(unsigned int op) { + op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } -static inline bool blk_rq_is_scsi(struct request *rq) -{ - return blk_op_is_scsi(req_op(rq)); -} - -static inline bool blk_rq_is_private(struct request *rq) -{ - return blk_op_is_private(req_op(rq)); -} - static inline bool blk_rq_is_passthrough(struct request *rq) { - return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); -} - -static inline bool bio_is_passthrough(struct bio *bio) -{ - unsigned op = bio_op(bio); - - return blk_op_is_scsi(op) || blk_op_is_private(op); -} - -static inline bool blk_op_is_passthrough(unsigned int op) -{ - return (blk_op_is_scsi(op & REQ_OP_MASK) || - blk_op_is_private(op & REQ_OP_MASK)); + return blk_op_is_passthrough(req_op(rq)); } static inline unsigned short req_get_ioprio(struct request *req) @@ -301,9 +270,6 @@ enum blk_queue_state { #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ -#define BLK_SCSI_MAX_CMDS (256) -#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) - /* * Zoned block device models (zoned limit). * @@ -424,8 +390,6 @@ struct request_queue { struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; - struct backing_dev_info *backing_dev_info; - /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. @@ -450,6 +414,8 @@ struct request_queue { spinlock_t queue_lock; + struct gendisk *disk; + /* * queue kobject */ @@ -493,6 +459,9 @@ struct request_queue { atomic_t nr_active_requests_shared_sbitmap; + struct sbitmap_queue sched_bitmap_tags; + struct sbitmap_queue sched_breserved_tags; + struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); @@ -529,11 +498,6 @@ struct request_queue { unsigned int max_active_zones; #endif /* CONFIG_BLK_DEV_ZONED */ - /* - * sg stuff - */ - unsigned int sg_timeout; - unsigned int sg_reserved_size; int node; struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE @@ -560,10 +524,6 @@ struct request_queue { int mq_freeze_depth; -#if defined(CONFIG_BLK_DEV_BSG) - struct bsg_class_device bsg_dev; -#endif - #ifdef CONFIG_BLK_DEV_THROTTLING /* Throttle data */ struct throtl_data *td; @@ -687,8 +647,6 @@ extern void blk_clear_pm_only(struct request_queue *q); dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) -#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent))) - static inline bool queue_is_mq(struct request_queue *q) { return q->mq_ops; @@ -911,16 +869,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_queue_split(struct bio **); -extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); -extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, - unsigned int, void __user *); -extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, - unsigned int, void __user *); -extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, - struct scsi_ioctl_command __user *); -extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); -extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); - extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); @@ -932,10 +880,12 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); -extern void blk_execute_rq(struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct gendisk *, struct request *, int, rq_end_io_fn *); +blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, + int at_head); + /* Helper to convert REQ_OP_XXX to its string format XXX */ extern const char *blk_op_str(unsigned int op); @@ -962,6 +912,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) #define SECTOR_SIZE (1 << SECTOR_SHIFT) #endif +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) +#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) +#define SECTOR_MASK (PAGE_SECTORS - 1) + /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -1008,6 +962,18 @@ static inline unsigned int blk_rq_stats_sectors(const struct request *rq) /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); +static inline unsigned int bio_zone_no(struct bio *bio) +{ + return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev), + bio->bi_iter.bi_sector); +} + +static inline unsigned int bio_zone_is_seq(struct bio *bio) +{ + return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), + bio->bi_iter.bi_sector); +} + static inline unsigned int blk_rq_zone_no(struct request *rq) { return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); @@ -1148,7 +1114,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q, unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); -void blk_queue_update_readahead(struct request_queue *q); +void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); @@ -1209,7 +1175,6 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, extern void blk_dump_rq_flags(struct request *, char *); bool __must_check blk_get_queue(struct request_queue *); -struct request_queue *blk_alloc_queue(int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1356,8 +1321,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, gfp_mask, 0); } -extern int blk_verify_command(unsigned char *cmd, fmode_t mode); - static inline bool bdev_is_partition(struct block_device *bdev) { return bdev->bd_partno; @@ -1386,6 +1349,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q) return q->limits.max_sectors; } +static inline unsigned int queue_max_bytes(struct request_queue *q) +{ + return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; +} + static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) { return q->limits.max_hw_sectors; @@ -1531,6 +1499,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return offset << SECTOR_SHIFT; } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1865,6 +1849,13 @@ struct block_device_operations { char *(*devnode)(struct gendisk *disk, umode_t *mode); struct module *owner; const struct pr_ops *pr_ops; + + /* + * Special callback for probing GPT entry at a given sector. + * Needed by Android devices, used by GPT scanner and MMC blk + * driver. + */ + int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); }; #ifdef CONFIG_COMPAT @@ -1994,8 +1985,6 @@ void blkdev_put_no_open(struct block_device *bdev); struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); void bdev_add(struct block_device *bdev, dev_t dev); struct block_device *I_BDEV(struct inode *inode); -struct block_device *bdgrab(struct block_device *bdev); -void bdput(struct block_device *); int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, loff_t lend); diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 2696eb0fc149..537e1b991f11 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -16,6 +16,26 @@ #define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT) #define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1) +/** + * xbc_calc_checksum() - Calculate checksum of bootconfig + * @data: Bootconfig data. + * @size: The size of the bootconfig data. + * + * Calculate the checksum value of the bootconfig data. + * The checksum will be used with the BOOTCONFIG_MAGIC and the size for + * embedding the bootconfig in the initrd image. + */ +static inline __init u32 xbc_calc_checksum(void *data, u32 size) +{ + unsigned char *p = data; + u32 ret = 0; + + while (size--) + ret += *p++; + + return ret; +} + /* XBC tree node */ struct xbc_node { u16 next; @@ -71,7 +91,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node) */ static inline __init bool xbc_node_is_array(struct xbc_node *node) { - return xbc_node_is_value(node) && node->next != 0; + return xbc_node_is_value(node) && node->child != 0; } /** @@ -80,6 +100,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node) * * Test the @node is a leaf key node which is a key node and has a value node * or no child. Returns true if it is a leaf node, or false if not. + * Note that the leaf node can have subkey nodes in addition to the + * value node. */ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) { @@ -88,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) } /* Tree-based key-value access APIs */ -struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent, +struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent, const char *key); const char * __init xbc_node_find_value(struct xbc_node *parent, @@ -126,7 +148,24 @@ xbc_find_value(const char *key, struct xbc_node **vnode) */ static inline struct xbc_node * __init xbc_find_node(const char *key) { - return xbc_node_find_child(NULL, key); + return xbc_node_find_subkey(NULL, key); +} + +/** + * xbc_node_get_subkey() - Return the first subkey node if exists + * @node: Parent node + * + * Return the first subkey node of the @node. If the @node has no child + * or only value node, this will return NULL. + */ +static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node) +{ + struct xbc_node *child = xbc_node_get_child(node); + + if (child && xbc_node_is_value(child)) + return xbc_node_get_next(child); + else + return child; } /** @@ -140,7 +179,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) */ #define xbc_array_for_each_value(anode, value) \ for (value = xbc_node_get_data(anode); anode != NULL ; \ - anode = xbc_node_get_next(anode), \ + anode = xbc_node_get_child(anode), \ value = anode ? xbc_node_get_data(anode) : NULL) /** @@ -149,12 +188,25 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) * @child: Iterated XBC node. * * Iterate child nodes of @parent. Each child nodes are stored to @child. + * The @child can be mixture of a value node and subkey nodes. */ #define xbc_node_for_each_child(parent, child) \ for (child = xbc_node_get_child(parent); child != NULL ; \ child = xbc_node_get_next(child)) /** + * xbc_node_for_each_subkey() - Iterate child subkey nodes + * @parent: An XBC node. + * @child: Iterated XBC node. + * + * Iterate subkey nodes of @parent. Each child nodes are stored to @child. + * The @child is only the subkey node. + */ +#define xbc_node_for_each_subkey(parent, child) \ + for (child = xbc_node_get_subkey(parent); child != NULL ; \ + child = xbc_node_get_next(child)) + +/** * xbc_node_for_each_array_value() - Iterate array entries of geven key * @node: An XBC node. * @key: A key string searched under @node @@ -162,16 +214,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key) * @value: Iterated value of array entry. * * Iterate array entries of given @key under @node. Each array entry node - * is stroed to @anode and @value. If the @node doesn't have @key node, + * is stored to @anode and @value. If the @node doesn't have @key node, * it does nothing. * Note that even if the found key node has only one value (not array) - * this executes block once. Hoever, if the found key node has no value + * this executes block once. However, if the found key node has no value * (key-only node), this does nothing. So don't use this for testing the * key-value pair existence. */ #define xbc_node_for_each_array_value(node, key, anode, value) \ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \ - anode = xbc_node_get_next(anode), \ + anode = xbc_node_get_child(anode), \ value = anode ? xbc_node_get_data(anode) : NULL) /** diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h new file mode 100644 index 000000000000..2bc8b1f69c93 --- /dev/null +++ b/include/linux/bootmem_info.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BOOTMEM_INFO_H +#define __LINUX_BOOTMEM_INFO_H + +#include <linux/mm.h> + +/* + * Types for free bootmem stored in page->lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. + */ +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE +void __init register_page_bootmem_info_node(struct pglist_data *pgdat); + +void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type); +void put_page_bootmem(struct page *page); + +/* + * Any memory allocated via the memblock allocator and not via the + * buddy will be marked reserved already in the memmap. For those + * pages, we can call this function to free it to buddy allocator. + */ +static inline void free_bootmem_page(struct page *page) +{ + unsigned long magic = (unsigned long)page->freelist; + + /* + * The reserve_bootmem_region sets the reserved flag on bootmem + * pages. + */ + VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); + + if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) + put_page_bootmem(page); + else + VM_BUG_ON_PAGE(1, page); +} +#else +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + +static inline void put_page_bootmem(struct page *page) +{ +} + +static inline void get_page_bootmem(unsigned long info, struct page *page, + unsigned long type) +{ +} + +static inline void free_bootmem_page(struct page *page) +{ + free_reserved_page(page); +} +#endif + +#endif /* __LINUX_BOOTMEM_INFO_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 8b77d08d4b47..2746fd804216 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,22 +23,73 @@ struct ctl_table_header; struct task_struct; #ifdef CONFIG_CGROUP_BPF +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS, + CGROUP_INET_SOCK_CREATE, + CGROUP_SOCK_OPS, + CGROUP_DEVICE, + CGROUP_INET4_BIND, + CGROUP_INET6_BIND, + CGROUP_INET4_CONNECT, + CGROUP_INET6_CONNECT, + CGROUP_INET4_POST_BIND, + CGROUP_INET6_POST_BIND, + CGROUP_UDP4_SENDMSG, + CGROUP_UDP6_SENDMSG, + CGROUP_SYSCTL, + CGROUP_UDP4_RECVMSG, + CGROUP_UDP6_RECVMSG, + CGROUP_GETSOCKOPT, + CGROUP_SETSOCKOPT, + CGROUP_INET4_GETPEERNAME, + CGROUP_INET6_GETPEERNAME, + CGROUP_INET4_GETSOCKNAME, + CGROUP_INET6_GETSOCKNAME, + CGROUP_INET_SOCK_RELEASE, + MAX_CGROUP_BPF_ATTACH_TYPE +}; -extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE]; -#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type]) +#define CGROUP_ATYPE(type) \ + case BPF_##type: return type -#define BPF_CGROUP_STORAGE_NEST_MAX 8 +static inline enum cgroup_bpf_attach_type +to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + CGROUP_ATYPE(CGROUP_INET_INGRESS); + CGROUP_ATYPE(CGROUP_INET_EGRESS); + CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); + CGROUP_ATYPE(CGROUP_SOCK_OPS); + CGROUP_ATYPE(CGROUP_DEVICE); + CGROUP_ATYPE(CGROUP_INET4_BIND); + CGROUP_ATYPE(CGROUP_INET6_BIND); + CGROUP_ATYPE(CGROUP_INET4_CONNECT); + CGROUP_ATYPE(CGROUP_INET6_CONNECT); + CGROUP_ATYPE(CGROUP_INET4_POST_BIND); + CGROUP_ATYPE(CGROUP_INET6_POST_BIND); + CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); + CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); + CGROUP_ATYPE(CGROUP_SYSCTL); + CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); + CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); + CGROUP_ATYPE(CGROUP_GETSOCKOPT); + CGROUP_ATYPE(CGROUP_SETSOCKOPT); + CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); + default: + return CGROUP_BPF_ATTACH_TYPE_INVALID; + } +} -struct bpf_cgroup_storage_info { - struct task_struct *task; - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; -}; +#undef CGROUP_ATYPE -/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks - * to use bpf cgroup storage simultaneously. - */ -DECLARE_PER_CPU(struct bpf_cgroup_storage_info, - bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); +extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; +#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) @@ -80,15 +131,15 @@ struct bpf_prog_array; struct cgroup_bpf { /* array of effective progs in this cgroup */ - struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; + struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE]; /* attached progs to this cgroup and attach flags * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will * have either zero or one element * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS */ - struct list_head progs[MAX_BPF_ATTACH_TYPE]; - u32 flags[MAX_BPF_ATTACH_TYPE]; + struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; + u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; /* list of cgroup shared storages */ struct list_head storages; @@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sk(struct sock *sk, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, - enum bpf_attach_type type, + enum cgroup_bpf_attach_type atype, void *t_ctx, u32 *flags); int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, - short access, enum bpf_attach_type type); + short access, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, int *optname, char __user *optval, @@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type( return BPF_CGROUP_STORAGE_SHARED; } -static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage - *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) -{ - enum bpf_cgroup_storage_type stype; - int i, err = 0; - - preempt_disable(); - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, current); - for_each_cgroup_storage_type(stype) - this_cpu_write(bpf_cgroup_storage_info[i].storage[stype], - storage[stype]); - goto out; - } - err = -EBUSY; - WARN_ON_ONCE(1); - -out: - preempt_enable(); - return err; -} - -static inline void bpf_cgroup_storage_unset(void) -{ - int i; - - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); - return; - } -} - struct bpf_cgroup_storage * cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, void *key, bool locked); @@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \ + if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ - BPF_CGROUP_INET_INGRESS); \ + CGROUP_INET_INGRESS); \ \ __ret; \ }) @@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ + if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ if (sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ - BPF_CGROUP_INET_EGRESS); \ + CGROUP_INET_EGRESS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ - __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + if (cgroup_bpf_enabled(atype)) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) -#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + if (cgroup_bpf_enabled(atype)) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, \ &__unused_flags); \ __ret; \ }) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ t_ctx, \ &__unused_flags); \ release_sock(sk); \ @@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). */ -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \ +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ ({ \ u32 __flags = 0; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, &__flags); \ release_sock(sk); \ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ @@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ - ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \ - cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \ + ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ + cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ (sk)->sk_prot->pre_connect) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a * fullsock and its parent fullsock cannot be traced by @@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ if (__sk && sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \ - __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ + if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ + __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ access, \ - BPF_CGROUP_DEVICE); \ + CGROUP_DEVICE); \ \ __ret; \ }) @@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \ + if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ buf, count, pos, \ - BPF_CGROUP_SYSCTL); \ + CGROUP_SYSCTL); \ __ret; \ }) @@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ get_user(__ret, optlen); \ __ret; \ }) @@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ tcp_bpf_bypass_getsockopt, \ @@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ sock, level, optname, optval, optlen, retval); \ __ret; \ @@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline int bpf_cgroup_storage_set( - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; } -static inline void bpf_cgroup_storage_unset(void) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( @@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, return 0; } -#define cgroup_bpf_enabled(type) (0) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) +#define cgroup_bpf_enabled(atype) (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) @@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 02b02cb29ce2..f4c16f19f83e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,6 +22,7 @@ #include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/bpfptr.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -69,6 +70,8 @@ struct bpf_map_ops { void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); + int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, + void *value, u64 flags); int (*map_lookup_and_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -165,6 +168,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ + int timer_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; @@ -194,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map) return map->spin_lock_off >= 0; } -static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +static inline bool map_value_has_timer(const struct bpf_map *map) { - if (likely(!map_value_has_spin_lock(map))) - return; - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; + return map->timer_off >= 0; } -/* copy everything but bpf_spin_lock */ +static inline void check_and_init_map_value(struct bpf_map *map, void *dst) +{ + if (unlikely(map_value_has_spin_lock(map))) + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; + if (unlikely(map_value_has_timer(map))) + *(struct bpf_timer *)(dst + map->timer_off) = + (struct bpf_timer){}; +} + +/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) { + u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; + if (unlikely(map_value_has_spin_lock(map))) { - u32 off = map->spin_lock_off; + s_off = map->spin_lock_off; + s_sz = sizeof(struct bpf_spin_lock); + } else if (unlikely(map_value_has_timer(map))) { + t_off = map->timer_off; + t_sz = sizeof(struct bpf_timer); + } - memcpy(dst, src, off); - memcpy(dst + off + sizeof(struct bpf_spin_lock), - src + off + sizeof(struct bpf_spin_lock), - map->value_size - off - sizeof(struct bpf_spin_lock)); + if (unlikely(s_sz || t_sz)) { + if (s_off < t_off || !s_sz) { + swap(s_off, t_off); + swap(s_sz, t_sz); + } + memcpy(dst, src, t_off); + memcpy(dst + t_off + t_sz, + src + t_off + t_sz, + s_off - t_off - t_sz); + memcpy(dst + s_off + s_sz, + src + s_off + s_sz, + map->value_size - s_off - s_sz); } else { memcpy(dst, src, map->value_size); } } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +void bpf_timer_cancel_and_free(void *timer); int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); struct bpf_offload_dev; @@ -311,6 +338,7 @@ enum bpf_arg_type { ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ __BPF_ARG_TYPE_MAX, }; @@ -551,6 +579,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SKIP_FRAME BIT(2) +/* Store IP address of the caller on the trampoline stack, + * so it's available for trampoline's programs. + */ +#define BPF_TRAMP_F_IP_ARG BIT(3) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ @@ -777,6 +810,7 @@ struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; + void *aux; union { struct { struct bpf_map *map; @@ -1069,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, /* an array of programs to be executed under rcu_lock. * * Typical usage: - * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); + * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); * * the structure returned by bpf_prog_array_alloc() should be populated * with program pointers and the last pointer must be NULL. @@ -1080,7 +1114,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + union { + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + u64 bpf_cookie; + }; }; struct bpf_prog_array { @@ -1106,73 +1143,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, + u64 bpf_cookie, struct bpf_prog_array **new_array); +struct bpf_run_ctx {}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; +}; + +static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) +{ + struct bpf_run_ctx *old_ctx = NULL; + +#ifdef CONFIG_BPF_SYSCALL + old_ctx = current->bpf_ctx; + current->bpf_ctx = new_ctx; +#endif + return old_ctx; +} + +static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) +{ +#ifdef CONFIG_BPF_SYSCALL + current->bpf_ctx = old_ctx; +#endif +} + /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) /* BPF program asks to set CN on the packet. */ #define BPF_RET_SET_CN (1 << 0) -/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY, - * if bpf_cgroup_storage_set() failed, the rest of programs - * will not execute. This should be a really rare scenario - * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of - * preemptions all between bpf_cgroup_storage_set() and - * bpf_cgroup_storage_unset() on the same cpu. - */ -#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - u32 func_ret; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - func_ret = func(_prog, ctx); \ - _ret &= (func_ret & 1); \ - *(ret_flags) |= (func_ret >> 1); \ - bpf_cgroup_storage_unset(); \ - _item++; \ - } \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) - -#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - if (unlikely(check_non_null && !_array))\ - goto _out; \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (!set_cg_storage) { \ - _ret &= func(_prog, ctx); \ - } else { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - _ret &= func(_prog, ctx); \ - bpf_cgroup_storage_unset(); \ - } \ - _item++; \ - } \ -_out: \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog, + u32 *ret_flags) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + u32 func_ret; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + func_ret = run_prog(prog, ctx); + ret &= (func_ret & 1); + *(ret_flags) |= (func_ret >> 1); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + if (unlikely(!array)) + goto out; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); +out: + rcu_read_unlock(); + migrate_enable(); + return ret; +} /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs * so BPF programs can request cwr for TCP packets. @@ -1201,7 +1298,7 @@ _out: \ u32 _flags = 0; \ bool _cn; \ u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ + _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ if (_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ @@ -1210,12 +1307,6 @@ _out: \ _ret; \ }) -#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) - -#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) - #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; @@ -1394,6 +1485,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, struct seq_file *seq); typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); +typedef const struct bpf_func_proto * +(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, + const struct bpf_prog *prog); enum bpf_iter_feature { BPF_ITER_RESCHED = BIT(0), @@ -1406,6 +1500,7 @@ struct bpf_iter_reg { bpf_iter_detach_target_t detach_target; bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; u32 ctx_arg_info_size; u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; @@ -1428,7 +1523,9 @@ struct bpf_iter__bpf_map_elem { int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); -int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +const struct bpf_func_proto * +bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); int bpf_iter_new_fd(struct bpf_link *link); bool bpf_link_is_iter(struct bpf_link *link); struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); @@ -1459,7 +1556,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); int bpf_get_file_flag(int flags); -int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, +int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, size_t actual_size); /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and @@ -1479,8 +1576,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) } /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, - union bpf_attr __user *uattr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr); #ifndef CONFIG_BPF_JIT_ALWAYS_ON void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); @@ -1499,14 +1595,19 @@ int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, + struct bpf_map *map, bool exclude_ingress); int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); -bool dev_map_can_have_prog(struct bpf_map *map); +int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, + struct bpf_prog *xdp_prog, struct bpf_map *map, + bool exclude_ingress); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); -bool cpu_map_prog_allowed(struct bpf_map *map); +int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1668,6 +1769,13 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, return 0; } +static inline +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, + struct bpf_map *map, bool exclude_ingress) +{ + return 0; +} + struct sk_buff; static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, @@ -1677,6 +1785,14 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, return 0; } +static inline +int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, + struct bpf_prog *xdp_prog, struct bpf_map *map, + bool exclude_ingress) +{ + return 0; +} + static inline void __cpu_map_flush(void) { } @@ -1688,6 +1804,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + static inline bool cpu_map_prog_allowed(struct bpf_map *map) { return false; @@ -1826,6 +1948,15 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +int bpf_prog_test_run_syscall(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); + +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1851,23 +1982,12 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } -#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) -int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); -int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); -int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); -void sock_map_unhash(struct sock *sk); -void sock_map_close(struct sock *sk, long timeout); - -void bpf_sk_reuseport_detach(struct sock *sk); -int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, - void *value); -int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags); -#else -static inline void bpf_sk_reuseport_detach(struct sock *sk) +static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) { + return -ENOTSUPP; } #ifdef CONFIG_BPF_SYSCALL @@ -1888,7 +2008,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ + +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} +#ifdef CONFIG_BPF_SYSCALL static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value) { @@ -1964,9 +2098,9 @@ extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; extern const struct bpf_func_proto bpf_task_storage_get_proto; extern const struct bpf_func_proto bpf_task_storage_delete_proto; extern const struct bpf_func_proto bpf_for_each_map_elem_proto; - -const struct bpf_func_proto *bpf_tracing_func_proto( - enum bpf_func_id func_id, const struct bpf_prog *prog); +extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; +extern const struct bpf_func_proto bpf_sk_setsockopt_proto; +extern const struct bpf_func_proto bpf_sk_getsockopt_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2015,6 +2149,7 @@ struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; + struct sock *migrating_sk; void *data_end; u32 hash; u32 reuseport_id; diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index b902c580c48d..24496bc28e7b 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -58,7 +58,7 @@ struct bpf_local_storage_data { * from the object's bpf_local_storage. * * Put it in the same cacheline as the data to minimize - * the number of cachelines access during the cache hit case. + * the number of cachelines accessed during the cache hit case. */ struct bpf_local_storage_map __rcu *smap; u8 data[] __aligned(8); @@ -71,7 +71,7 @@ struct bpf_local_storage_elem { struct bpf_local_storage __rcu *local_storage; struct rcu_head rcu; /* 8 bytes hole */ - /* The data is stored in aother cacheline to minimize + /* The data is stored in another cacheline to minimize * the number of cachelines access during a cache hit. */ struct bpf_local_storage_data sdata ____cacheline_aligned; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index f883f01a5061..9c81724e4b98 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -77,6 +77,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, void *, void *) #endif /* CONFIG_BPF_LSM */ #endif +BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall, + void *, void *) BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) @@ -132,4 +134,8 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) +#endif +#ifdef CONFIG_PERF_EVENTS +BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 06841517ab1e..5424124dbe36 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -53,7 +53,14 @@ struct bpf_reg_state { /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ - struct bpf_map *map_ptr; + struct { + struct bpf_map *map_ptr; + /* To distinguish map lookups from outer map + * the map_uid is non-zero for registers + * pointing to inner maps. + */ + u32 map_uid; + }; /* for PTR_TO_BTF_ID */ struct { @@ -201,12 +208,19 @@ struct bpf_func_state { * zero == main subprog */ u32 subprogno; + /* Every bpf_timer_start will increment async_entry_cnt. + * It's used to distinguish: + * void foo(void) { for(;;); } + * void foo(void) { bpf_timer_set_callback(,foo); } + */ + u32 async_entry_cnt; + bool in_callback_fn; + bool in_async_callback_fn; /* The following fields should be last. See copy_func_state() */ int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; - bool in_callback_fn; struct bpf_stack_state *stack; }; @@ -215,6 +229,13 @@ struct bpf_idx_pair { u32 idx; }; +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +/* Maximum number of register states that can exist at once */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ @@ -333,8 +354,8 @@ struct bpf_insn_aux_data { }; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ @@ -385,6 +406,7 @@ struct bpf_subprog_info { bool has_tail_call; bool tail_call_reachable; bool has_ld_abs; + bool is_async_cb; }; /* single container for all structs @@ -407,6 +429,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool allow_ptr_to_map_access; @@ -418,6 +441,7 @@ struct bpf_verifier_env { const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; struct { int *insn_state; int *insn_stack; @@ -442,6 +466,7 @@ struct bpf_verifier_env { u32 peak_states; /* longest register parentage chain walked for liveness marking */ u32 longest_mark_read_walk; + bpfptr_t fd_array; }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h new file mode 100644 index 000000000000..546e27fc6d46 --- /dev/null +++ b/include/linux/bpfptr.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* A pointer that can point to either kernel or userspace memory. */ +#ifndef _LINUX_BPFPTR_H +#define _LINUX_BPFPTR_H + +#include <linux/sockptr.h> + +typedef sockptr_t bpfptr_t; + +static inline bool bpfptr_is_kernel(bpfptr_t bpfptr) +{ + return bpfptr.is_kernel; +} + +static inline bpfptr_t KERNEL_BPFPTR(void *p) +{ + return (bpfptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline bpfptr_t USER_BPFPTR(void __user *p) +{ + return (bpfptr_t) { .user = p }; +} + +static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel) +{ + if (is_kernel) + return KERNEL_BPFPTR((void*) (uintptr_t) addr); + else + return USER_BPFPTR(u64_to_user_ptr(addr)); +} + +static inline bool bpfptr_is_null(bpfptr_t bpfptr) +{ + if (bpfptr_is_kernel(bpfptr)) + return !bpfptr.kernel; + return !bpfptr.user; +} + +static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val) +{ + if (bpfptr_is_kernel(*bpfptr)) + bpfptr->kernel += val; + else + bpfptr->user += val; +} + +static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src, + size_t offset, size_t size) +{ + return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size); +} + +static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size) +{ + return copy_from_bpfptr_offset(dst, src, 0, size); +} + +static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, + const void *src, size_t size) +{ + return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); +} + +static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) +{ + void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_bpfptr(p, src, len)) { + kvfree(p); + return ERR_PTR(-EFAULT); + } + return p; +} + +static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) +{ + return strncpy_from_sockptr(dst, (sockptr_t) src, count); +} + +#endif /* _LINUX_BPFPTR_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 960988d42f77..6b211323a489 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -12,6 +12,7 @@ #include <linux/blkdev.h> #include <scsi/scsi_request.h> +struct bsg_job; struct request; struct device; struct scatterlist; diff --git a/include/linux/bsg.h b/include/linux/bsg.h index dac37b6e00ec..1ac81c809da9 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -4,36 +4,16 @@ #include <uapi/linux/bsg.h> -struct request; +struct bsg_device; +struct device; +struct request_queue; -#ifdef CONFIG_BLK_DEV_BSG -struct bsg_ops { - int (*check_proto)(struct sg_io_v4 *hdr); - int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, - fmode_t mode); - int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); - void (*free_rq)(struct request *rq); -}; +typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr, + fmode_t mode, unsigned int timeout); -struct bsg_class_device { - struct device *class_dev; - int minor; - struct request_queue *queue; - const struct bsg_ops *ops; -}; +struct bsg_device *bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + bsg_sg_io_fn *sg_io_fn); +void bsg_unregister_queue(struct bsg_device *bcd); -int bsg_register_queue(struct request_queue *q, struct device *parent, - const char *name, const struct bsg_ops *ops); -int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); -void bsg_unregister_queue(struct request_queue *q); -#else -static inline int bsg_scsi_register_queue(struct request_queue *q, - struct device *parent) -{ - return 0; -} -static inline void bsg_unregister_queue(struct request_queue *q) -{ -} -#endif /* CONFIG_BLK_DEV_BSG */ #endif /* _LINUX_BSG_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 3bac66e0183a..214fde93214b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -21,7 +21,7 @@ extern const struct file_operations btf_fops; void btf_get(struct btf *btf); void btf_put(struct btf *btf); -int btf_new_fd(const union bpf_attr *attr); +int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, const union bpf_attr *attr, @@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); +int btf_find_timer(const struct btf *btf, const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t); s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind); const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 57890b357f85..47d9abfbdb55 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl) #define BTF_ID_LIST_SINGLE(name, prefix, typename) \ BTF_ID_LIST(name) \ BTF_ID(prefix, typename) +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \ + BTF_ID_LIST_GLOBAL(name) \ + BTF_ID(prefix, typename) /* * The BTF_ID_UNUSED macro defines 4 zero bytes. @@ -148,6 +151,7 @@ extern struct btf_id_set name; #define BTF_ID_UNUSED #define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; #define BTF_SET_START(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; #define BTF_SET_END(name) @@ -172,7 +176,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) enum { #define BTF_SOCK_TYPE(name, str) name, @@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE, extern u32 btf_sock_ids[]; #endif +extern u32 btf_task_struct_ids[]; + #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e7e99da31349..6486d3c19463 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -409,7 +409,7 @@ static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } static inline void invalidate_bh_lrus_cpu(int cpu) {} -static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } +static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } #define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 40232f90db6e..3b7a0ff4642f 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -8,5 +8,13 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); + +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) +extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX]; +void init_vmlinux_build_id(void); +#else +static inline void init_vmlinux_build_id(void) { } +#endif #endif diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ff832e698efb..0e9bdd42dafb 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -4,9 +4,10 @@ * * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> */ -#ifndef __LINUX_BVEC_ITER_H -#define __LINUX_BVEC_ITER_H +#ifndef __LINUX_BVEC_H +#define __LINUX_BVEC_H +#include <linux/highmem.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/limits.h> @@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec, } } -#endif /* __LINUX_BVEC_ITER_H */ +/** + * bvec_kmap_local - map a bvec into the kernel virtual address space + * @bvec: bvec to map + * + * Must be called on single-page bvecs only. Call kunmap_local on the returned + * address to unmap. + */ +static inline void *bvec_kmap_local(struct bio_vec *bvec) +{ + return kmap_local_page(bvec->bv_page) + bvec->bv_offset; +} + +/** + * memcpy_from_bvec - copy data from a bvec + * @bvec: bvec to copy from + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec) +{ + memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * memcpy_to_bvec - copy data to a bvec + * @bvec: bvec to copy to + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from) +{ + memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); +} + +/** + * memzero_bvec - zero all data in a bvec + * @bvec: bvec to zero + * + * Must be called on single-page bvecs only. + */ +static inline void memzero_bvec(struct bio_vec *bvec) +{ + memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * bvec_virt - return the virtual address for a bvec + * @bvec: bvec to return the virtual address for + * + * Note: the caller must ensure that @bvec->bv_page is not a highmem page. + */ +static inline void *bvec_virt(struct bio_vec *bvec) +{ + WARN_ON_ONCE(PageHighMem(bvec->bv_page)); + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +#endif /* __LINUX_BVEC_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 4f72b47973c3..2f909ed084c6 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -79,24 +79,6 @@ struct cpu_cacheinfo { bool cpu_map_populated; }; -/* - * Helpers to make sure "func" is executed on the cpu whose cache - * attributes are being detected - */ -#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ -static inline void _##func(void *ret) \ -{ \ - int cpu = smp_processor_id(); \ - *(int *)ret = __##func(cpu); \ -} \ - \ -int func(unsigned int cpu) \ -{ \ - int ret; \ - smp_call_function_single(cpu, _##func, &ret, true); \ - return ret; \ -} - struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index ae7a3411167c..9de6e9053e34 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -37,7 +37,7 @@ * quanta, from when the bit is sent on the TX pin to when it is * received on the RX pin of the transmitter. Possible options: * - * O: automatic mode. The controller dynamically measure @tdcv + * 0: automatic mode. The controller dynamically measures @tdcv * for each transmitted CAN FD frame. * * Other values: manual mode. Use the fixed provided value. @@ -45,7 +45,7 @@ * @tdco: Transmitter Delay Compensation Offset. Offset value, in time * quanta, defining the distance between the start of the bit * reception on the RX pin of the transceiver and the SSP - * position such as SSP = @tdcv + @tdco. + * position such that SSP = @tdcv + @tdco. * * If @tdco is zero, then TDC is disabled and both @tdcv and * @tdcf should be ignored. diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 27b275e463da..2413253e54c7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,12 @@ enum can_mode { CAN_MODE_SLEEP }; +enum can_termination_gpio { + CAN_TERMINATION_GPIO_DISABLED = 0, + CAN_TERMINATION_GPIO_ENABLED, + CAN_TERMINATION_GPIO_MAX, +}; + /* * CAN common private data */ @@ -55,6 +61,8 @@ struct can_priv { unsigned int termination_const_cnt; const u16 *termination_const; u16 termination; + struct gpio_desc *termination_gpio; + u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX]; enum can_state state; diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 000000000000..1b536fb999de --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_PLATFORM_FLEXCAN_H +#define _CAN_PLATFORM_FLEXCAN_H + +struct flexcan_platform_data { + u32 clock_frequency; + u8 clk_src; +}; + +#endif /* _CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 40882df7105e..c11477620403 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -20,6 +20,7 @@ struct can_rx_offload { bool drop); struct sk_buff_head skb_queue; + struct sk_buff_head skb_irq_queue; u32 skb_queue_len_max; unsigned int mb_first; @@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int *frame_len_ptr); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb); +void can_rx_offload_irq_finish(struct can_rx_offload *offload); +void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); -static inline void can_rx_offload_schedule(struct can_rx_offload *offload) -{ - napi_schedule(&offload->napi); -} - static inline void can_rx_offload_disable(struct can_rx_offload *offload) { napi_disable(&offload->napi); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index f48d0a31deae..c4fef00abdf3 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -86,11 +86,13 @@ struct cdrom_device_ops { /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); -/* driver specifications */ - const int capability; /* capability flags */ /* handle uniform packets for scsi type devices (scsi,atapi) */ int (*generic_packet) (struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nframes, u8 *last_sense); +/* driver specifications */ + const int capability; /* capability flags */ }; int cdrom_multisession(struct cdrom_device_info *cdi, diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 71b5d481c653..6b138fa97db8 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -50,7 +50,7 @@ struct ceph_auth_client_ops { * another request. */ int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); - int (*handle_reply)(struct ceph_auth_client *ac, int result, + int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id, void *buf, void *end, u8 *session_key, int *session_key_len, u8 *con_secret, int *con_secret_len); @@ -104,6 +104,8 @@ struct ceph_auth_client { struct mutex mutex; }; +void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id); + struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key, const int *con_modes); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index e41a811026f6..bc2699feddbe 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -299,6 +299,7 @@ enum { CEPH_SESSION_FLUSHMSG_ACK, CEPH_SESSION_FORCE_RO, CEPH_SESSION_REJECT, + CEPH_SESSION_REQUEST_FLUSH_MDLOG, }; extern const char *ceph_session_op_name(int op); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index fb8f6d2cd104..e1c705fdfa7c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -71,6 +71,9 @@ enum { /* Cgroup is frozen. */ CGRP_FROZEN, + + /* Control group has to be killed. */ + CGRP_KILL, }; /* cgroup_root->flags */ @@ -110,6 +113,7 @@ enum { CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ + CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 6bc9c76680b2..7bf60454a313 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -676,6 +676,8 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return &cgrp->psi; } +bool cgroup_psi_enabled(void); + static inline void cgroup_init_kthreadd(void) { /* @@ -696,6 +698,7 @@ static inline void cgroup_kthread_ready(void) } void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); +struct cgroup *cgroup_get_from_id(u64 id); #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -735,6 +738,11 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return NULL; } +static inline bool cgroup_psi_enabled(void) +{ + return false; +} + static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { @@ -743,6 +751,11 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task, static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) {} + +static inline struct cgroup *cgroup_get_from_id(u64 id) +{ + return NULL; +} #endif /* !CONFIG_CGROUPS */ #ifdef CONFIG_CGROUPS @@ -906,20 +919,6 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze); void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, struct cgroup *dst); -static inline bool cgroup_task_freeze(struct task_struct *task) -{ - bool ret; - - if (task->flags & PF_KTHREAD) - return false; - - rcu_read_lock(); - ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); - rcu_read_unlock(); - - return ret; -} - static inline bool cgroup_task_frozen(struct task_struct *task) { return task->frozen; @@ -929,10 +928,6 @@ static inline bool cgroup_task_frozen(struct task_struct *task) static inline void cgroup_enter_frozen(void) { } static inline void cgroup_leave_frozen(bool always_leave) { } -static inline bool cgroup_task_freeze(struct task_struct *task) -{ - return false; -} static inline bool cgroup_task_frozen(struct task_struct *task) { return false; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 162a2e5546a3..f59c875271a0 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -342,7 +342,7 @@ struct clk_fixed_rate { unsigned long flags; }; -#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0) +#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0) extern const struct clk_ops clk_fixed_rate_ops; struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev, @@ -629,6 +629,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, unsigned long rate, unsigned long *prate, const struct clk_div_table *table, u8 width, unsigned long flags, unsigned int val); +int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, + const struct clk_div_table *table, u8 width, + unsigned long flags); +int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, + const struct clk_div_table *table, u8 width, + unsigned long flags, unsigned int val); int divider_get_val(unsigned long rate, unsigned long parent_rate, const struct clk_div_table *table, u8 width, unsigned long flags); @@ -995,6 +1001,12 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are * used for the divider register. Setting this flag makes the register * accesses big endian. + * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might + * be saturated and the caller will get quite far from the good enough + * approximation. Instead the caller may require, by setting this flag, + * to shift left by a few bits in case, when the asked one is quite small + * to satisfy the desired range of denominator. It assumes that on the + * caller's side the power-of-two capable prescaler exists. */ struct clk_fractional_divider { struct clk_hw hw; @@ -1016,8 +1028,8 @@ struct clk_fractional_divider { #define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) #define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1) +#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2) -extern const struct clk_ops clk_fractional_divider_ops; struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, @@ -1063,9 +1075,9 @@ struct clk_multiplier { #define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw) -#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) -#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) +#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) extern const struct clk_ops clk_multiplier_ops; diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index a4f82e836a7c..ccb3f034bfa9 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -137,6 +137,32 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */ +#define AT91_PMC_MCR_V2_ID_MSK (0xF) +#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK) +#define AT91_PMC_MCR_V2_CMD (1 << 7) +#define AT91_PMC_MCR_V2_DIV (7 << 8) +#define AT91_PMC_MCR_V2_DIV1 (0 << 8) +#define AT91_PMC_MCR_V2_DIV2 (1 << 8) +#define AT91_PMC_MCR_V2_DIV4 (2 << 8) +#define AT91_PMC_MCR_V2_DIV8 (3 << 8) +#define AT91_PMC_MCR_V2_DIV16 (4 << 8) +#define AT91_PMC_MCR_V2_DIV32 (5 << 8) +#define AT91_PMC_MCR_V2_DIV64 (6 << 8) +#define AT91_PMC_MCR_V2_DIV3 (7 << 8) +#define AT91_PMC_MCR_V2_CSS (0x1F << 16) +#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16) +#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16) +#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16) +#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16) +#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16) +#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16) +#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16) +#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16) +#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16) +#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16) +#define AT91_PMC_MCR_V2_EN (1 << 28) + #define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index f7ff722a03dd..d128ad1570aa 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -123,20 +123,6 @@ static inline void tegra_cpu_clock_resume(void) } #endif -extern int tegra210_plle_hw_sequence_start(void); -extern bool tegra210_plle_hw_sequence_is_enabled(void); -extern void tegra210_xusb_pll_hw_control_enable(void); -extern void tegra210_xusb_pll_hw_sequence_start(void); -extern void tegra210_sata_pll_hw_control_enable(void); -extern void tegra210_sata_pll_hw_sequence_start(void); -extern void tegra210_set_sata_pll_seq_sw(bool state); -extern void tegra210_put_utmipll_in_iddq(void); -extern void tegra210_put_utmipll_out_iddq(void); -extern int tegra210_clk_handle_mbist_war(unsigned int id); -extern void tegra210_clk_emc_dll_enable(bool flag); -extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); -extern void tegra210_clk_emc_update_setting(u32 emc_src_value); - struct clk; struct tegra_emc; @@ -144,17 +130,10 @@ typedef long (tegra20_clk_emc_round_cb)(unsigned long rate, unsigned long min_rate, unsigned long max_rate, void *arg); - -void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, - void *cb_arg); -int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); - typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc, unsigned long rate); typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc, unsigned long rate); -void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, - tegra124_emc_complete_timing_change_cb *complete_cb); struct tegra210_clk_emc_config { unsigned long rate; @@ -176,8 +155,87 @@ struct tegra210_clk_emc_provider { const struct tegra210_clk_emc_config *config); }; +#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) +void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, + void *cb_arg); +int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +#else +static inline void +tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, + void *cb_arg) +{ +} + +static inline int +tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same) +{ + return 0; +} +#endif + +#ifdef CONFIG_TEGRA124_CLK_EMC +void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, + tegra124_emc_complete_timing_change_cb *complete_cb); +#else +static inline void +tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb, + tegra124_emc_complete_timing_change_cb *complete_cb) +{ +} +#endif + +#ifdef CONFIG_ARCH_TEGRA_210_SOC +int tegra210_plle_hw_sequence_start(void); +bool tegra210_plle_hw_sequence_is_enabled(void); +void tegra210_xusb_pll_hw_control_enable(void); +void tegra210_xusb_pll_hw_sequence_start(void); +void tegra210_sata_pll_hw_control_enable(void); +void tegra210_sata_pll_hw_sequence_start(void); +void tegra210_set_sata_pll_seq_sw(bool state); +void tegra210_put_utmipll_in_iddq(void); +void tegra210_put_utmipll_out_iddq(void); +int tegra210_clk_handle_mbist_war(unsigned int id); +void tegra210_clk_emc_dll_enable(bool flag); +void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +void tegra210_clk_emc_update_setting(u32 emc_src_value); + int tegra210_clk_emc_attach(struct clk *clk, struct tegra210_clk_emc_provider *provider); void tegra210_clk_emc_detach(struct clk *clk); +#else +static inline int tegra210_plle_hw_sequence_start(void) +{ + return 0; +} + +static inline bool tegra210_plle_hw_sequence_is_enabled(void) +{ + return false; +} + +static inline int tegra210_clk_handle_mbist_war(unsigned int id) +{ + return 0; +} + +static inline int +tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider) +{ + return 0; +} + +static inline void tegra210_xusb_pll_hw_control_enable(void) {} +static inline void tegra210_xusb_pll_hw_sequence_start(void) {} +static inline void tegra210_sata_pll_hw_control_enable(void) {} +static inline void tegra210_sata_pll_hw_sequence_start(void) {} +static inline void tegra210_set_sata_pll_seq_sw(bool state) {} +static inline void tegra210_put_utmipll_in_iddq(void) {} +static inline void tegra210_put_utmipll_out_iddq(void) {} +static inline void tegra210_clk_emc_dll_enable(bool flag) {} +static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {} +static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {} +static inline void tegra210_clk_emc_detach(struct clk *clk) {} +#endif #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index c62f6fa6763d..3486f20a3753 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -63,6 +63,17 @@ struct clk_omap_reg { * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs + * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading + * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency + * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg + * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg + * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg + * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in + * @control_reg + * @ssc_modfreq: the DPLL SSC frequency modulation in kHz + * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent) + * @ssc_downspread: require the only low frequency spread of the DPLL in SSC + * mode * @flags: DPLL type/features (see below) * * Possible values for @flags: @@ -110,6 +121,17 @@ struct dpll_data { u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; + struct clk_omap_reg ssc_deltam_reg; + struct clk_omap_reg ssc_modfreq_reg; + u32 ssc_deltam_int_mask; + u32 ssc_deltam_frac_mask; + u32 ssc_modfreq_mant_mask; + u32 ssc_modfreq_exp_mask; + u32 ssc_enable_mask; + u32 ssc_downspread_mask; + u32 ssc_modfreq; + u32 ssc_deltam; + bool ssc_downspread; u8 flags; }; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index fd06b2780a22..8a8423eb8e9a 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -30,11 +30,6 @@ struct clk_lookup { .clk = c, \ } -struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, - const char *dev_fmt, ...) __printf(3, 4); -struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, - const char *dev_fmt, ...) __printf(3, 4); - void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index d6ab416ee2d2..1d42d4b17327 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -43,6 +43,8 @@ struct module; * @shift: Cycle to nanosecond divisor (power of two) * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) * @maxadj: Maximum adjustment value to mult (~11%) + * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second. + * Zero says to use default WATCHDOG_THRESHOLD. * @archdata: Optional arch-specific data * @max_cycles: Maximum safe cycle value which won't overflow on * multiplication @@ -98,6 +100,7 @@ struct clocksource { u32 shift; u64 max_idle_ns; u32 maxadj; + u32 uncertainty_margin; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif @@ -137,7 +140,7 @@ struct clocksource { #define CLOCK_SOURCE_UNSTABLE 0x40 #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 #define CLOCK_SOURCE_RESELECT 0x100 - +#define CLOCK_SOURCE_VERIFY_PERCPU 0x200 /* simplify initialization of mask field */ #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) @@ -288,4 +291,7 @@ static inline void timer_probe(void) {} #define TIMER_ACPI_DECLARE(name, table_id, fn) \ ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) +extern ulong max_cswd_read_retries; +void clocksource_verify_percpu(struct clocksource *cs); + #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h deleted file mode 100644 index 68a541807bdf..000000000000 --- a/include/linux/cmdline-parser.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Parsing command line, get the partitions information. - * - * Written by Cai Zhiyong <caizhiyong@huawei.com> - * - */ -#ifndef CMDLINEPARSEH -#define CMDLINEPARSEH - -#include <linux/blkdev.h> -#include <linux/fs.h> -#include <linux/slab.h> - -/* partition flags */ -#define PF_RDONLY 0x01 /* Device is read only */ -#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ - -struct cmdline_subpart { - char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ - sector_t from; - sector_t size; - int flags; - struct cmdline_subpart *next_subpart; -}; - -struct cmdline_parts { - char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ - unsigned int nr_subparts; - struct cmdline_subpart *subpart; - struct cmdline_parts *next_parts; -}; - -void cmdline_parts_free(struct cmdline_parts **parts); - -int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); - -struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, - const char *bdev); - -int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, - int slot, - int (*add_part)(int, struct cmdline_subpart *, void *), - void *param); - -#endif /* CMDLINEPARSEH */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4221888bdcd6..34bce35c808d 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -35,12 +35,12 @@ enum compact_result { COMPACT_CONTINUE, /* - * The full zone was compacted scanned but wasn't successfull to compact + * The full zone was compacted scanned but wasn't successful to compact * suitable pages. */ COMPACT_COMPLETE, /* - * direct compaction has scanned part of the zone but wasn't successfull + * direct compaction has scanned part of the zone but wasn't successful * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, @@ -84,6 +84,8 @@ static inline unsigned long compact_gap(unsigned int order) extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); +extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table, + int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; diff --git a/include/linux/compat.h b/include/linux/compat.h index 8855b1b702b2..1c758b0e0359 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -20,11 +20,8 @@ #include <linux/unistd.h> #include <asm/compat.h> - -#ifdef CONFIG_COMPAT #include <asm/siginfo.h> #include <asm/signal.h> -#endif #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER /* @@ -95,8 +92,6 @@ struct compat_iovec { compat_size_t iov_len; }; -#ifdef CONFIG_COMPAT - #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() #endif @@ -131,9 +126,11 @@ struct compat_tms { #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) +#ifndef compat_sigset_t typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; +#endif int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); @@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params { __u32 __spare[8]; }; +struct compat_stat; struct compat_statfs; struct compat_statfs64; struct compat_old_linux_dirent; @@ -397,14 +395,6 @@ struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; -#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) - -#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) - -long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, - unsigned long bitmap_size); -long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, - unsigned long bitmap_size); void copy_siginfo_to_external32(struct compat_siginfo *to, const struct kernel_siginfo *from); int copy_siginfo_from_user32(kernel_siginfo_t *to, @@ -428,7 +418,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsigned int size) { /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ -#ifdef __BIG_ENDIAN +#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT) compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; @@ -521,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, struct epoll_event; /* fortunately, this one is fixed-layout */ -extern void __user *compat_alloc_user_space(unsigned long len); - int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); #define unsafe_compat_save_altstack(uss, sp, label) do { \ @@ -532,8 +520,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); &__uss->ss_sp, label); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ - if (t->sas_ss_flags & SS_AUTODISARM) \ - sas_ss_reset(t); \ } while (0); /* @@ -811,26 +797,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr /* mm/fadvise.c: No generic prototype for fadvise64_64 */ /* mm/, CONFIG_MMU only */ -asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, - compat_ulong_t mode, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, compat_ulong_t flags); -asmlinkage long compat_sys_get_mempolicy(int __user *policy, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, - compat_ulong_t addr, - compat_ulong_t flags); -asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, - compat_ulong_t maxnode); -asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, - compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, - const compat_ulong_t __user *new_nodes); -asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, - __u32 __user *pages, - const int __user *nodes, - int __user *status, - int flags); - asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); @@ -931,17 +897,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ - -/* - * For most but not all architectures, "am I in a compat syscall?" and - * "am I a compat task?" are the same question. For architectures on which - * they aren't the same question, arch code can override in_compat_syscall. - */ - -#ifndef in_compat_syscall -static inline bool in_compat_syscall(void) { return is_compat_task(); } -#endif - /** * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted @@ -971,6 +926,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf); +#ifdef CONFIG_COMPAT + +/* + * For most but not all architectures, "am I in a compat syscall?" and + * "am I a compat task?" are the same question. For architectures on which + * they aren't the same question, arch code can override in_compat_syscall. + */ +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return is_compat_task(); } +#endif + #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) @@ -980,6 +946,15 @@ static inline bool in_compat_syscall(void) { return false; } #endif /* CONFIG_COMPAT */ +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); + /* * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit * types, and will need special compat treatment for that. Most architectures diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index adbe76b203e2..49b0ac8b6fd3 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -13,6 +13,12 @@ /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 +/* + * Note: Checking __has_feature(*_sanitizer) is only true if the feature is + * enabled. Therefore it is not required to additionally check defined(CONFIG_*) + * to avoid adding redundant attributes in other configurations. + */ + #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) /* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ @@ -46,6 +52,17 @@ #endif /* + * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together + * with no_sanitize("coverage"). Prior versions of Clang support coverage + * instrumentation, but cannot be queried for support by the preprocessor. + */ +#if __has_feature(coverage_sanitizer) +#define __no_sanitize_coverage __attribute__((no_sanitize("coverage"))) +#else +#define __no_sanitize_coverage +#endif + +/* * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5d97ef738a57..cb9217fc60af 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -122,6 +122,12 @@ #define __no_sanitize_undefined #endif +#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__) +#define __no_sanitize_coverage __attribute__((no_sanitize_coverage)) +#else +#define __no_sanitize_coverage +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index df5b405e6305..b67261a1e3e9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * The __COUNTER__ based labels are a hack to make each instance of the macros * unique, to convince GCC not to merge duplicate inline asm statements. */ -#define annotate_reachable() ({ \ - asm volatile("%c0:\n\t" \ +#define __stringify_label(n) #n + +#define __annotate_reachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.reachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) -#define annotate_unreachable() ({ \ - asm volatile("%c0:\n\t" \ +#define annotate_reachable() __annotate_reachable(__COUNTER__) + +#define __annotate_unreachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.unreachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define annotate_unreachable() __annotate_unreachable(__COUNTER__) + #define ASM_UNREACHABLE \ "999:\n\t" \ ".pushsection .discard.unreachable\n\t" \ @@ -213,6 +219,16 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __v; \ }) +/* + * With CONFIG_CFI_CLANG, the compiler replaces function addresses in + * instrumented C code with jump table addresses. Architectures that + * support CFI can define this macro to return the actual function address + * when needed. + */ +#ifndef function_nocfi +#define function_nocfi(x) (x) +#endif + #endif /* __KERNEL__ */ /* diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index c043b8d2b17b..2487be0e7199 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -27,15 +27,16 @@ */ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x -# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___assume_aligned__ 1 # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 +# define __GCC4_has_attribute___no_profile_instrument_function__ 0 # define __GCC4_has_attribute___nonstring__ 0 -# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) -# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___no_sanitize_address__ 1 +# define __GCC4_has_attribute___no_sanitize_undefined__ 1 # define __GCC4_has_attribute___fallthrough__ 0 #endif @@ -199,6 +200,7 @@ * must end with any of these keywords: * break; * fallthrough; + * continue; * goto <label>; * return [expression]; * @@ -238,6 +240,18 @@ #endif /* + * Optional: only supported since GCC >= 7.1, clang >= 13.0. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function + */ +#if __has_attribute(__no_profile_instrument_function__) +# define __no_profile __attribute__((__no_profile_instrument_function__)) +#else +# define __no_profile +#endif + +/* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn * clang: https://clang.llvm.org/docs/AttributeReference.html#id1 diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index d29bda7f6ebd..e4ea86fc584d 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -210,7 +210,7 @@ struct ftrace_likely_data { /* Section for code which can't be instrumented at all */ #define noinstr \ noinline notrace __attribute((__section__(".noinstr.text"))) \ - __no_kcsan __no_sanitize_address + __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage #endif /* __KERNEL__ */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 85008a65e21f..93a2922b7653 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -220,6 +220,10 @@ struct coresight_sysfs_link { * @nr_links: number of sysfs links created to other components from this * device. These will appear in the "connections" group. * @has_conns_grp: Have added a "connections" group for sysfs links. + * @feature_csdev_list: List of complex feature programming added to the device. + * @config_csdev_list: List of system configurations added to the device. + * @cscfg_csdev_lock: Protect the lists of configurations and features. + * @active_cscfg_ctxt: Context information for current active system configuration. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -241,6 +245,11 @@ struct coresight_device { int nr_links; bool has_conns_grp; bool ect_enabled; /* true only if associated ect device is enabled */ + /* system configuration and feature lists */ + struct list_head feature_csdev_list; + struct list_head config_csdev_list; + spinlock_t cscfg_csdev_lock; + void *active_cscfg_ctxt; }; /* diff --git a/include/linux/counter.h b/include/linux/counter.h index 9dbd5df4cd34..d16ce2819b48 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -162,15 +162,15 @@ struct counter_count_ext { void *priv; }; -enum counter_count_function { - COUNTER_COUNT_FUNCTION_INCREASE = 0, - COUNTER_COUNT_FUNCTION_DECREASE, - COUNTER_COUNT_FUNCTION_PULSE_DIRECTION, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X4 +enum counter_function { + COUNTER_FUNCTION_INCREASE = 0, + COUNTER_FUNCTION_DECREASE, + COUNTER_FUNCTION_PULSE_DIRECTION, + COUNTER_FUNCTION_QUADRATURE_X1_A, + COUNTER_FUNCTION_QUADRATURE_X1_B, + COUNTER_FUNCTION_QUADRATURE_X2_A, + COUNTER_FUNCTION_QUADRATURE_X2_B, + COUNTER_FUNCTION_QUADRATURE_X4 }; /** @@ -192,7 +192,7 @@ struct counter_count { const char *name; size_t function; - const enum counter_count_function *functions_list; + const enum counter_function *functions_list; size_t num_functions; struct counter_synapse *synapses; @@ -290,16 +290,16 @@ struct counter_device_state { const struct attribute_group **groups; }; -enum counter_signal_value { - COUNTER_SIGNAL_LOW = 0, - COUNTER_SIGNAL_HIGH +enum counter_signal_level { + COUNTER_SIGNAL_LEVEL_LOW, + COUNTER_SIGNAL_LEVEL_HIGH, }; /** * struct counter_ops - Callbacks from driver * @signal_read: optional read callback for Signal attribute. The read - * value of the respective Signal should be passed back via - * the val parameter. + * level of the respective Signal should be passed back via + * the level parameter. * @count_read: optional read callback for Count attribute. The read * value of the respective Count should be passed back via * the val parameter. @@ -324,7 +324,7 @@ enum counter_signal_value { struct counter_ops { int (*signal_read)(struct counter_device *counter, struct counter_signal *signal, - enum counter_signal_value *val); + enum counter_signal_level *level); int (*count_read)(struct counter_device *counter, struct counter_count *count, unsigned long *val); int (*count_write)(struct counter_device *counter, diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 94a578a96202..9cf51e41e697 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -143,12 +143,6 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ -/* Wrappers which go away once all code is converted */ -static inline void cpu_hotplug_begin(void) { cpus_write_lock(); } -static inline void cpu_hotplug_done(void) { cpus_write_unlock(); } -static inline void get_online_cpus(void) { cpus_read_lock(); } -static inline void put_online_cpus(void) { cpus_read_unlock(); } - #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 353969c7acd3..ff88bb3e44fc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -9,10 +9,14 @@ #define _LINUX_CPUFREQ_H #include <linux/clk.h> +#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/completion.h> #include <linux/kobject.h> #include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/spinlock.h> #include <linux/sysfs.h> @@ -331,15 +335,6 @@ struct cpufreq_driver { unsigned long capacity); /* - * Caches and returns the lowest driver-supported frequency greater than - * or equal to the target frequency, subject to any driver limitations. - * Does not set the frequency. Only to be implemented for drivers with - * target(). - */ - unsigned int (*resolve_freq)(struct cpufreq_policy *policy, - unsigned int target_freq); - - /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. * @@ -371,18 +366,20 @@ struct cpufreq_driver { int (*online)(struct cpufreq_policy *policy); int (*offline)(struct cpufreq_policy *policy); int (*exit)(struct cpufreq_policy *policy); - void (*stop_cpu)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); - /* Will be called after the driver is fully initialized */ - void (*ready)(struct cpufreq_policy *policy); - struct freq_attr **attr; /* platform specific boost support code */ bool boost_enabled; int (*set_boost)(struct cpufreq_policy *policy, int state); + + /* + * Set by drivers that want to register with the energy model after the + * policy is properly initialized, but before the governor is started. + */ + void (*register_em)(struct cpufreq_policy *policy); }; /* flags */ @@ -1005,6 +1002,55 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy return count; } + +static inline int parse_perf_domain(int cpu, const char *list_name, + const char *cell_name) +{ + struct device_node *cpu_np; + struct of_phandle_args args; + int ret; + + cpu_np = of_cpu_device_node_get(cpu); + if (!cpu_np) + return -ENODEV; + + ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0, + &args); + if (ret < 0) + return ret; + + of_node_put(cpu_np); + + return args.args[0]; +} + +static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, + const char *cell_name, struct cpumask *cpumask) +{ + int target_idx; + int cpu, ret; + + ret = parse_perf_domain(pcpu, list_name, cell_name); + if (ret < 0) + return ret; + + target_idx = ret; + cpumask_set_cpu(pcpu, cpumask); + + for_each_possible_cpu(cpu) { + if (cpu == pcpu) + continue; + + ret = parse_perf_domain(pcpu, list_name, cell_name); + if (ret < 0) + continue; + + if (target_idx == ret) + cpumask_set_cpu(cpu, cpumask); + } + + return target_idx; +} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -1024,6 +1070,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) { return false; } + +static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, + const char *cell_name, struct cpumask *cpumask) +{ + return -EOPNOTSUPP; +} #endif #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) @@ -1045,7 +1097,6 @@ void arch_set_freq_scale(const struct cpumask *cpus, { } #endif - /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; @@ -1056,4 +1107,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu); void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); + +static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) +{ + dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), + policy->related_cpus); +} #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 4a62b3980642..832d8a74fa59 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -22,8 +22,42 @@ * AP_ACTIVE AP_ACTIVE */ +/* + * CPU hotplug states. The state machine invokes the installed state + * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE + * during a CPU online operation. During a CPU offline operation the + * installed teardown callbacks are invoked in the reverse order from + * CPU_ONLINE - 1 down to CPUHP_OFFLINE. + * + * The state space has three sections: PREPARE, STARTING and ONLINE. + * + * PREPARE: The callbacks are invoked on a control CPU before the + * hotplugged CPU is started up or after the hotplugged CPU has died. + * + * STARTING: The callbacks are invoked on the hotplugged CPU from the low level + * hotplug startup/teardown code with interrupts disabled. + * + * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU + * hotplug thread with interrupts and preemption enabled. + * + * Adding explicit states to this enum is only necessary when: + * + * 1) The state is within the STARTING section + * + * 2) The state has ordering constraints vs. other states in the + * same section. + * + * If neither #1 nor #2 apply, please use the dynamic state space when + * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE + * for the @state argument of the setup function. + * + * See Documentation/core-api/cpu_hotplug.rst for further information and + * examples. + */ enum cpuhp_state { CPUHP_INVALID = -1, + + /* PREPARE section invoked on a control CPU */ CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, @@ -46,15 +80,17 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, + CPUHP_BIO_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, CPUHP_FS_BUFF_DEAD, CPUHP_PRINTK_DEAD, CPUHP_MM_MEMCQ_DEAD, + CPUHP_XFS_DEAD, CPUHP_PERCPU_CNT_DEAD, CPUHP_RADIX_DEAD, - CPUHP_PAGE_ALLOC_DEAD, + CPUHP_PAGE_ALLOC, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_IOVA_DEAD, @@ -93,6 +129,11 @@ enum cpuhp_state { CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, + + /* + * STARTING section invoked on the hotplugged CPU in low level + * bringup and teardown code. + */ CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, @@ -153,6 +194,8 @@ enum cpuhp_state { CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, + + /* Online section invoked on the hotplugged CPU from the hotplug thread */ CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, @@ -171,7 +214,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, - CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, @@ -215,14 +257,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, int (*teardown)(unsigned int cpu), bool multi_instance); /** - * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks + * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup + * callback * @state: The state for which the calls are installed * @name: Name of the callback (will be used in debug output) - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * - * Installs the callback functions and invokes the startup callback on - * the present cpus which have already reached the @state. + * Installs the callback functions and invokes the @startup callback on + * the online cpus which have already reached the @state. */ static inline int cpuhp_setup_state(enum cpuhp_state state, const char *name, @@ -232,6 +275,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state, return __cpuhp_setup_state(state, name, true, startup, teardown, false); } +/** + * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling + * @startup callback from a cpus_read_lock() + * held region + * @state: The state for which the calls are installed + * @name: Name of the callback (will be used in debug output) + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required + * + * Same as cpuhp_setup_state() except that it must be invoked from within a + * cpus_read_lock() held region. + */ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), @@ -243,14 +298,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, /** * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the - * callbacks + * @startup callback * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * - * Same as @cpuhp_setup_state except that no calls are executed are invoked - * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. + * Same as cpuhp_setup_state() except that the @startup callback is not + * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n. */ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, const char *name, @@ -261,6 +316,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, false); } +/** + * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without + * invoking the @startup callback from + * a cpus_read_lock() held region + * callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback. + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required + * + * Same as cpuhp_setup_state_nocalls() except that it must be invoked from + * within a cpus_read_lock() held region. + */ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), @@ -274,13 +342,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, * cpuhp_setup_state_multi - Add callbacks for multi state * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * * Sets the internal multi_instance flag and prepares a state to work as a multi * instance callback. No callbacks are invoked at this point. The callbacks are * invoked once an instance for this state are registered via - * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls. + * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls() */ static inline int cpuhp_setup_state_multi(enum cpuhp_state state, const char *name, @@ -305,9 +373,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state and invokes the startup callback on - * the present cpus which have already reached the @state. The @state must have - * been earlier marked as multi-instance by @cpuhp_setup_state_multi. + * Installs the instance for the @state and invokes the registered startup + * callback on the online cpus which have already reached the @state. The + * @state must have been earlier marked as multi-instance by + * cpuhp_setup_state_multi(). */ static inline int cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node) @@ -321,8 +390,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state The @state must have been earlier - * marked as multi-instance by @cpuhp_setup_state_multi. + * Installs the instance for the @state. The @state must have been earlier + * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or + * HOTPLUG_CPU=n. */ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, struct hlist_node *node) @@ -330,6 +400,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, return __cpuhp_state_add_instance(state, node, false); } +/** + * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state + * without invoking the startup + * callback from a cpus_read_lock() + * held region. + * @state: The state for which the instance is installed + * @node: The node for this individual state. + * + * Same as cpuhp_state_add_instance_nocalls() except that it must be + * invoked from within a cpus_read_lock() held region. + */ static inline int cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, struct hlist_node *node) @@ -345,7 +426,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); * @state: The state for which the calls are removed * * Removes the callback functions and invokes the teardown callback on - * the present cpus which have already reached the @state. + * the online cpus which have already reached the @state. */ static inline void cpuhp_remove_state(enum cpuhp_state state) { @@ -354,7 +435,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state) /** * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking - * teardown + * the teardown callback * @state: The state for which the calls are removed */ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) @@ -362,6 +443,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) __cpuhp_remove_state(state, false); } +/** + * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking + * teardown from a cpus_read_lock() held region. + * @state: The state for which the calls are removed + * + * Same as cpuhp_remove_state nocalls() except that it must be invoked + * from within a cpus_read_lock() held region. + */ static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) { __cpuhp_remove_state_cpuslocked(state, false); @@ -389,8 +478,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, * @state: The state from which the instance is removed * @node: The node for this individual state. * - * Removes the instance and invokes the teardown callback on the present cpus - * which have already reached the @state. + * Removes the instance and invokes the teardown callback on the online cpus + * which have already reached @state. */ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node) @@ -400,7 +489,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, /** * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state - * without invoking the reatdown callback + * without invoking the teardown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index bfc4690de4f4..5d4d07a9e1ed 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -259,7 +259,7 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location * @cpu: the (optionally unsigned) integer iterator - * @mask: the cpumask poiter + * @mask: the cpumask pointer * @start: the start location * * The implementation does not assume any bit in @mask is set (including @start). @@ -983,6 +983,44 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) nr_cpu_ids); } +/** + * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as + * hex values of cpumask + * + * @buf: the buffer to copy into + * @mask: the cpumask to copy + * @off: in the string from which we are copying, we copy to @buf + * @count: the maximum number of bytes to print + * + * The function prints the cpumask into the buffer as hex values of + * cpumask; Typically used by bin_attribute to export cpumask bitmask + * ABI. + * + * Returns the length of how many bytes have been copied. + */ +static inline ssize_t +cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + +/** + * cpumap_print_list_to_buf - copies the cpumask into the buffer as + * comma-separated list of cpus + * + * Everything is same with the above cpumap_print_bitmask_to_buf() + * except the print format. + */ +static inline ssize_t +cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_list_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + #if NR_CPUS <= BITS_PER_LONG #define CPU_MASK_ALL \ (cpumask_t) { { \ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 04c20de66afc..d2b9c41c8edf 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -15,6 +15,7 @@ #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> +#include <linux/mmu_context.h> #include <linux/jump_label.h> #ifdef CONFIG_CPUSETS @@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); -extern void cpuset_cpus_allowed_fallback(struct task_struct *p); +extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - cpumask_copy(mask, cpu_possible_mask); + cpumask_copy(mask, task_cpu_possible_mask(p)); } -static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) +static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) { + return false; } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 206bde8308b2..de62a722431e 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,8 +38,12 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) -#define VMCOREINFO_BUILD_ID(value) \ - vmcoreinfo_append_str("BUILD-ID=%s\n", value) +#define VMCOREINFO_BUILD_ID() \ + ({ \ + static_assert(sizeof(vmlinux_build_id) == 20); \ + vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \ + }) + #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -69,10 +73,6 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; -/* raw contents of kernel .notes section */ -extern const void __start_notes __weak; -extern const void __stop_notes __weak; - Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index a5192b718dbe..2618577a4d6d 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -10,13 +10,14 @@ #include <linux/pgtable.h> /* for pgprot_t */ -#ifdef CONFIG_CRASH_DUMP +/* For IS_ENABLED(CONFIG_CRASH_DUMP) */ #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) extern unsigned long long elfcorehdr_addr; extern unsigned long long elfcorehdr_size; +#ifdef CONFIG_CRASH_DUMP extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); extern void elfcorehdr_free(unsigned long long addr); extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); diff --git a/include/linux/cred.h b/include/linux/cred.h index 14971322e1a0..fcbc6885cc09 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -143,6 +143,7 @@ struct cred { #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ + struct ucounts *ucounts; struct group_info *group_info; /* supplementary groups for euid/fsgid */ /* RCU deletion */ union { @@ -169,6 +170,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); +extern int set_cred_ucounts(struct cred *); /* * check for validity of credentials @@ -369,6 +371,7 @@ static inline void put_cred(const struct cred *_cred) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) +#define task_ucounts(task) (task_cred_xxx((task), ucounts)) #define current_cred_xxx(xxx) \ ({ \ @@ -385,6 +388,7 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) +#define current_ucounts() (current_cred_xxx(ucounts)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS diff --git a/include/linux/crypto.h b/include/linux/crypto.h index da5e0d74bb2f..855869e1fd32 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -643,32 +643,6 @@ struct crypto_comp { struct crypto_tfm base; }; -enum { - CRYPTOA_UNSPEC, - CRYPTOA_ALG, - CRYPTOA_TYPE, - CRYPTOA_U32, - __CRYPTOA_MAX, -}; - -#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) - -/* Maximum number of (rtattr) parameters for each template. */ -#define CRYPTO_MAX_ATTRS 32 - -struct crypto_attr_alg { - char name[CRYPTO_MAX_ALG_NAME]; -}; - -struct crypto_attr_type { - u32 type; - u32 mask; -}; - -struct crypto_attr_u32 { - u32 num; -}; - /* * Transform user interface. */ diff --git a/include/linux/damon.h b/include/linux/damon.h new file mode 100644 index 000000000000..d68b67b8d458 --- /dev/null +++ b/include/linux/damon.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DAMON api + * + * Author: SeongJae Park <sjpark@amazon.de> + */ + +#ifndef _DAMON_H_ +#define _DAMON_H_ + +#include <linux/mutex.h> +#include <linux/time64.h> +#include <linux/types.h> + +/* Minimal region size. Every damon_region is aligned by this. */ +#define DAMON_MIN_REGION PAGE_SIZE + +/** + * struct damon_addr_range - Represents an address region of [@start, @end). + * @start: Start address of the region (inclusive). + * @end: End address of the region (exclusive). + */ +struct damon_addr_range { + unsigned long start; + unsigned long end; +}; + +/** + * struct damon_region - Represents a monitoring target region. + * @ar: The address range of the region. + * @sampling_addr: Address of the sample for the next access check. + * @nr_accesses: Access frequency of this region. + * @list: List head for siblings. + */ +struct damon_region { + struct damon_addr_range ar; + unsigned long sampling_addr; + unsigned int nr_accesses; + struct list_head list; +}; + +/** + * struct damon_target - Represents a monitoring target. + * @id: Unique identifier for this target. + * @nr_regions: Number of monitoring target regions of this target. + * @regions_list: Head of the monitoring target regions of this target. + * @list: List head for siblings. + * + * Each monitoring context could have multiple targets. For example, a context + * for virtual memory address spaces could have multiple target processes. The + * @id of each target should be unique among the targets of the context. For + * example, in the virtual address monitoring context, it could be a pidfd or + * an address of an mm_struct. + */ +struct damon_target { + unsigned long id; + unsigned int nr_regions; + struct list_head regions_list; + struct list_head list; +}; + +struct damon_ctx; + +/** + * struct damon_primitive Monitoring primitives for given use cases. + * + * @init: Initialize primitive-internal data structures. + * @update: Update primitive-internal data structures. + * @prepare_access_checks: Prepare next access check of target regions. + * @check_accesses: Check the accesses to target regions. + * @reset_aggregated: Reset aggregated accesses monitoring results. + * @target_valid: Determine if the target is valid. + * @cleanup: Clean up the context. + * + * DAMON can be extended for various address spaces and usages. For this, + * users should register the low level primitives for their target address + * space and usecase via the &damon_ctx.primitive. Then, the monitoring thread + * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting + * the monitoring, @update after each &damon_ctx.primitive_update_interval, and + * @check_accesses, @target_valid and @prepare_access_checks after each + * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each + * &damon_ctx.aggr_interval. + * + * @init should initialize primitive-internal data structures. For example, + * this could be used to construct proper monitoring target regions and link + * those to @damon_ctx.adaptive_targets. + * @update should update the primitive-internal data structures. For example, + * this could be used to update monitoring target regions for current status. + * @prepare_access_checks should manipulate the monitoring regions to be + * prepared for the next access check. + * @check_accesses should check the accesses to each region that made after the + * last preparation and update the number of observed accesses of each region. + * It should also return max number of observed accesses that made as a result + * of its update. The value will be used for regions adjustment threshold. + * @reset_aggregated should reset the access monitoring results that aggregated + * by @check_accesses. + * @target_valid should check whether the target is still valid for the + * monitoring. + * @cleanup is called from @kdamond just before its termination. + */ +struct damon_primitive { + void (*init)(struct damon_ctx *context); + void (*update)(struct damon_ctx *context); + void (*prepare_access_checks)(struct damon_ctx *context); + unsigned int (*check_accesses)(struct damon_ctx *context); + void (*reset_aggregated)(struct damon_ctx *context); + bool (*target_valid)(void *target); + void (*cleanup)(struct damon_ctx *context); +}; + +/* + * struct damon_callback Monitoring events notification callbacks. + * + * @before_start: Called before starting the monitoring. + * @after_sampling: Called after each sampling. + * @after_aggregation: Called after each aggregation. + * @before_terminate: Called before terminating the monitoring. + * @private: User private data. + * + * The monitoring thread (&damon_ctx.kdamond) calls @before_start and + * @before_terminate just before starting and finishing the monitoring, + * respectively. Therefore, those are good places for installing and cleaning + * @private. + * + * The monitoring thread calls @after_sampling and @after_aggregation for each + * of the sampling intervals and aggregation intervals, respectively. + * Therefore, users can safely access the monitoring results without additional + * protection. For the reason, users are recommended to use these callback for + * the accesses to the results. + * + * If any callback returns non-zero, monitoring stops. + */ +struct damon_callback { + void *private; + + int (*before_start)(struct damon_ctx *context); + int (*after_sampling)(struct damon_ctx *context); + int (*after_aggregation)(struct damon_ctx *context); + int (*before_terminate)(struct damon_ctx *context); +}; + +/** + * struct damon_ctx - Represents a context for each monitoring. This is the + * main interface that allows users to set the attributes and get the results + * of the monitoring. + * + * @sample_interval: The time between access samplings. + * @aggr_interval: The time between monitor results aggregations. + * @primitive_update_interval: The time between monitoring primitive updates. + * + * For each @sample_interval, DAMON checks whether each region is accessed or + * not. It aggregates and keeps the access information (number of accesses to + * each region) for @aggr_interval time. DAMON also checks whether the target + * memory regions need update (e.g., by ``mmap()`` calls from the application, + * in case of virtual memory monitoring) and applies the changes for each + * @primitive_update_interval. All time intervals are in micro-seconds. + * Please refer to &struct damon_primitive and &struct damon_callback for more + * detail. + * + * @kdamond: Kernel thread who does the monitoring. + * @kdamond_stop: Notifies whether kdamond should stop. + * @kdamond_lock: Mutex for the synchronizations with @kdamond. + * + * For each monitoring context, one kernel thread for the monitoring is + * created. The pointer to the thread is stored in @kdamond. + * + * Once started, the monitoring thread runs until explicitly required to be + * terminated or every monitoring target is invalid. The validity of the + * targets is checked via the &damon_primitive.target_valid of @primitive. The + * termination can also be explicitly requested by writing non-zero to + * @kdamond_stop. The thread sets @kdamond to NULL when it terminates. + * Therefore, users can know whether the monitoring is ongoing or terminated by + * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from + * outside of the monitoring thread must be protected by @kdamond_lock. + * + * Note that the monitoring thread protects only @kdamond and @kdamond_stop via + * @kdamond_lock. Accesses to other fields must be protected by themselves. + * + * @primitive: Set of monitoring primitives for given use cases. + * @callback: Set of callbacks for monitoring events notifications. + * + * @min_nr_regions: The minimum number of adaptive monitoring regions. + * @max_nr_regions: The maximum number of adaptive monitoring regions. + * @adaptive_targets: Head of monitoring targets (&damon_target) list. + */ +struct damon_ctx { + unsigned long sample_interval; + unsigned long aggr_interval; + unsigned long primitive_update_interval; + +/* private: internal use only */ + struct timespec64 last_aggregation; + struct timespec64 last_primitive_update; + +/* public: */ + struct task_struct *kdamond; + bool kdamond_stop; + struct mutex kdamond_lock; + + struct damon_primitive primitive; + struct damon_callback callback; + + unsigned long min_nr_regions; + unsigned long max_nr_regions; + struct list_head adaptive_targets; +}; + +#define damon_next_region(r) \ + (container_of(r->list.next, struct damon_region, list)) + +#define damon_prev_region(r) \ + (container_of(r->list.prev, struct damon_region, list)) + +#define damon_for_each_region(r, t) \ + list_for_each_entry(r, &t->regions_list, list) + +#define damon_for_each_region_safe(r, next, t) \ + list_for_each_entry_safe(r, next, &t->regions_list, list) + +#define damon_for_each_target(t, ctx) \ + list_for_each_entry(t, &(ctx)->adaptive_targets, list) + +#define damon_for_each_target_safe(t, next, ctx) \ + list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list) + +#ifdef CONFIG_DAMON + +struct damon_region *damon_new_region(unsigned long start, unsigned long end); +inline void damon_insert_region(struct damon_region *r, + struct damon_region *prev, struct damon_region *next, + struct damon_target *t); +void damon_add_region(struct damon_region *r, struct damon_target *t); +void damon_destroy_region(struct damon_region *r, struct damon_target *t); + +struct damon_target *damon_new_target(unsigned long id); +void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); +void damon_free_target(struct damon_target *t); +void damon_destroy_target(struct damon_target *t); +unsigned int damon_nr_regions(struct damon_target *t); + +struct damon_ctx *damon_new_ctx(void); +void damon_destroy_ctx(struct damon_ctx *ctx); +int damon_set_targets(struct damon_ctx *ctx, + unsigned long *ids, ssize_t nr_ids); +int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, + unsigned long aggr_int, unsigned long primitive_upd_int, + unsigned long min_nr_reg, unsigned long max_nr_reg); +int damon_nr_running_ctxs(void); + +int damon_start(struct damon_ctx **ctxs, int nr_ctxs); +int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); + +#endif /* CONFIG_DAMON */ + +#ifdef CONFIG_DAMON_VADDR + +/* Monitoring primitives for virtual memory address spaces */ +void damon_va_init(struct damon_ctx *ctx); +void damon_va_update(struct damon_ctx *ctx); +void damon_va_prepare_access_checks(struct damon_ctx *ctx); +unsigned int damon_va_check_accesses(struct damon_ctx *ctx); +bool damon_va_target_valid(void *t); +void damon_va_cleanup(struct damon_ctx *ctx); +void damon_va_set_primitives(struct damon_ctx *ctx); + +#endif /* CONFIG_DAMON_VADDR */ + +#endif /* _DAMON_H */ diff --git a/include/linux/dax.h b/include/linux/dax.h index b52f084aa643..2619d94c308d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -41,7 +41,6 @@ struct dax_operations { extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) -struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops, unsigned long flags); void put_dax(struct dax_device *dax_dev); @@ -58,8 +57,6 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -73,10 +70,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, return dax_synchronous(dax_dev); } #else -static inline struct dax_device *dax_get_by_host(const char *host) -{ - return NULL; -} static inline struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops, unsigned long flags) { @@ -106,12 +99,6 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } -static inline bool dax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t len) -{ - return false; -} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -122,22 +109,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct writeback_control; int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) -bool __bdev_dax_supported(struct block_device *bdev, int blocksize); -static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) -{ - return __bdev_dax_supported(bdev, blocksize); -} - -bool __generic_fsdax_supported(struct dax_device *dax_dev, +bool generic_fsdax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, sector_t sectors); -static inline bool generic_fsdax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors) -{ - return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, - sectors); -} + +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); static inline void fs_put_dax(struct dax_device *dax_dev) { @@ -153,15 +130,11 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t st dax_entry_t dax_lock_page(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie); #else -static inline bool bdev_dax_supported(struct block_device *bdev, - int blocksize) -{ - return false; -} +#define generic_fsdax_supported NULL -static inline bool generic_fsdax_supported(struct dax_device *dax_dev, +static inline bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors) + sector_t len) { return false; } diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 2915f56ad421..3f49e65169c6 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -3,8 +3,7 @@ #define __LINUX_DEBUG_LOCKING_H #include <linux/atomic.h> -#include <linux/bug.h> -#include <linux/printk.h> +#include <linux/cache.h> struct task_struct; @@ -27,8 +26,10 @@ extern int debug_locks_off(void); int __ret = 0; \ \ if (!oops_in_progress && unlikely(c)) { \ + instrumentation_begin(); \ if (debug_locks_off() && !debug_locks_silent) \ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ + instrumentation_end(); \ __ret = 1; \ } \ __ret; \ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 1fdb4343af9c..c869f1e73d75 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -112,8 +112,8 @@ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value); void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); -struct dentry *debugfs_create_ulong(const char *name, umode_t mode, - struct dentry *parent, unsigned long *value); +void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, + unsigned long *value); void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value); void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, @@ -126,8 +126,8 @@ void debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value); void debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value); -struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, bool *value); +void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, + bool *value); void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent, char **value); @@ -266,13 +266,9 @@ static inline void debugfs_create_u32(const char *name, umode_t mode, static inline void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { } -static inline struct dentry *debugfs_create_ulong(const char *name, - umode_t mode, - struct dentry *parent, - unsigned long *value) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, + unsigned long *value) { } static inline void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { } @@ -295,12 +291,8 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode, atomic_t *value) { } -static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, - bool *value) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, bool *value) { } static inline void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent, diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8d2dde23e9fb..32444686b6ff 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -18,7 +18,7 @@ enum debug_obj_state { struct debug_obj_descr; /** - * struct debug_obj - representaion of an tracked object + * struct debug_obj - representation of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state * @astate: current active state diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 21651f946751..af7e6eb50283 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -58,16 +58,22 @@ struct task_delay_info { #include <linux/sched.h> #include <linux/slab.h> +#include <linux/jump_label.h> #ifdef CONFIG_TASK_DELAY_ACCT +DECLARE_STATIC_KEY_FALSE(delayacct_key); extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); + +extern int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); + extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(struct task_struct *); -extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern int delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); @@ -114,6 +120,9 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) static inline void delayacct_blkio_start(void) { + if (!static_branch_unlikely(&delayacct_key)) + return; + delayacct_set_flag(current, DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); @@ -121,19 +130,14 @@ static inline void delayacct_blkio_start(void) static inline void delayacct_blkio_end(struct task_struct *p) { + if (!static_branch_unlikely(&delayacct_key)) + return; + if (p->delays) __delayacct_blkio_end(p); delayacct_clear_flag(p, DELAYACCT_PF_BLKIO); } -static inline int delayacct_add_tsk(struct taskstats *d, - struct task_struct *tsk) -{ - if (!delayacct_on || !tsk->delays) - return 0; - return __delayacct_add_tsk(d, tsk); -} - static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { if (tsk->delays) diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 6f009559ee54..8904063d4c9f 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -38,8 +38,8 @@ __printf(3, 4) __cold int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); __printf(3, 4) __cold -void dev_printk(const char *level, const struct device *dev, - const char *fmt, ...); +void _dev_printk(const char *level, const struct device *dev, + const char *fmt, ...); __printf(2, 3) __cold void _dev_emerg(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold @@ -69,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) {} static inline __printf(3, 4) -void dev_printk(const char *level, const struct device *dev, +void _dev_printk(const char *level, const struct device *dev, const char *fmt, ...) {} @@ -98,24 +98,56 @@ void _dev_info(const struct device *dev, const char *fmt, ...) #endif /* + * Need to take variadic arguments even though we don't use them, as dev_fmt() + * may only just have been expanded and may result in multiple arguments. + */ +#define dev_printk_index_emit(level, fmt, ...) \ + printk_index_subsys_emit("%s %s: ", level, fmt) + +#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \ + ({ \ + dev_printk_index_emit(level, fmt); \ + _p_func(dev, fmt, ##__VA_ARGS__); \ + }) + +/* + * Some callsites directly call dev_printk rather than going through the + * dev_<level> infrastructure, so we need to emit here as well as inside those + * level-specific macros. Only one index entry will be produced, either way, + * since dev_printk's `fmt` isn't known at compile time if going through the + * dev_<level> macros. + * + * dev_fmt() isn't called for dev_printk when used directly, as it's used by + * the dev_<level> macros internally which already have dev_fmt() processed. + * + * We also can't use dev_printk_index_wrap directly, because we have a separate + * level to process. + */ +#define dev_printk(level, dev, fmt, ...) \ + ({ \ + dev_printk_index_emit(level, fmt); \ + _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ + }) + +/* * #defines for all the dev_<level> macros to prefix with whatever * possible use of #define dev_fmt(fmt) ... */ -#define dev_emerg(dev, fmt, ...) \ - _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_crit(dev, fmt, ...) \ - _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_alert(dev, fmt, ...) \ - _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_err(dev, fmt, ...) \ - _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_warn(dev, fmt, ...) \ - _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_notice(dev, fmt, ...) \ - _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_info(dev, fmt, ...) \ - _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_emerg(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_crit(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_alert(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_err(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_warn(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_notice(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_info(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) @@ -236,7 +268,7 @@ do { \ * using WARN/WARN_ONCE to include file/line information and a backtrace. */ #define dev_WARN(dev, format, arg...) \ - WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); + WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg) #define dev_WARN_ONCE(dev, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: " format, \ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ff700fb6ce1d..114553b487ef 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -31,7 +31,7 @@ enum dm_queue_mode { DM_TYPE_DAX_BIO_BASED = 3, }; -typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; union map_info { void *ptr; @@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages); -#define PAGE_SECTORS (PAGE_SIZE / 512) void dm_error(const char *message); @@ -361,6 +360,12 @@ struct dm_target { * Set if we need to limit the number of in-flight bios when swapping. */ bool limit_swap_bios:1; + + /* + * Set if this target implements a a zoned device and needs emulation of + * zone append operations using regular writes. + */ + bool emulate_zone_append:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -478,7 +483,8 @@ struct dm_report_zones_args { /* must be filled by ->report_zones before calling dm_report_zones_cb */ sector_t start; }; -int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data); +int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, + struct dm_report_zones_args *args, unsigned int nr_zones); #endif /* CONFIG_BLK_DEV_ZONED */ /* @@ -596,6 +602,10 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) +#define DMEMIT_TARGET_NAME_VERSION(y) \ + DMEMIT("target_name=%s,target_version=%u.%u.%u", \ + (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) + /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/device.h b/include/linux/device.h index f1a00040fa53..e270cb740b9e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -165,21 +165,12 @@ void device_remove_bin_file(struct device *dev, typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); -#ifdef CONFIG_DEBUG_DEVRES void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) -#else -void *devres_alloc_node(dr_release_t release, size_t size, - gfp_t gfp, int nid) __malloc; -static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) -{ - return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); -} -#endif void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, @@ -351,6 +342,22 @@ enum dl_dev_state { }; /** + * enum device_removable - Whether the device is removable. The criteria for a + * device to be classified as removable is determined by its subsystem or bus. + * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this + * device (default). + * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown. + * @DEVICE_FIXED: Device is not removable by the user. + * @DEVICE_REMOVABLE: Device is removable by the user. + */ +enum device_removable { + DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */ + DEVICE_REMOVABLE_UNKNOWN, + DEVICE_FIXED, + DEVICE_REMOVABLE, +}; + +/** * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. @@ -399,7 +406,8 @@ struct dev_links_info { * along with subsystem-level and driver-level callbacks. * @em_pd: device's energy model performance domain * @pins: For device pin management. - * See Documentation/driver-api/pinctl.rst for details. + * See Documentation/driver-api/pin-control.rst for details. + * @msi_lock: Lock to protect MSI mask cache and mask register * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. @@ -416,6 +424,7 @@ struct dev_links_info { * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations + * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -431,6 +440,9 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu: Per device generic IOMMU runtime data + * @removable: Whether the device can be removed from the system. This + * should be set by the subsystem / bus driver that discovered + * the device. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -496,6 +508,7 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spinlock_t msi_lock; struct list_head msi_list; #endif #ifdef CONFIG_DMA_OPS @@ -522,6 +535,9 @@ struct device { struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif +#ifdef CONFIG_SWIOTLB + struct io_tlb_mem *dma_io_tlb_mem; +#endif /* arch specific additions */ struct dev_archdata archdata; @@ -544,6 +560,8 @@ struct device { struct iommu_group *iommu_group; struct dev_iommu *iommu; + enum device_removable removable; + bool offline_disabled:1; bool offline:1; bool of_node_reused:1; @@ -780,6 +798,22 @@ static inline bool dev_has_sync_state(struct device *dev) return false; } +static inline void dev_set_removable(struct device *dev, + enum device_removable removable) +{ + dev->removable = removable; +} + +static inline bool dev_is_removable(struct device *dev) +{ + return dev->removable == DEVICE_REMOVABLE; +} + +static inline bool dev_removable_is_valid(struct device *dev) +{ + return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED; +} + /* * High level routines for use by the bus drivers */ @@ -817,6 +851,7 @@ int device_online(struct device *dev); void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); +void device_set_node(struct device *dev, struct fwnode_handle *fwnode); static inline int dev_num_vf(struct device *dev) { @@ -845,6 +880,8 @@ static inline void *dev_get_platdata(const struct device *dev) * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ +int __must_check device_driver_attach(struct device_driver *drv, + struct device *dev); int __must_check device_bind_driver(struct device *dev); void device_release_driver(struct device *dev); int __must_check device_attach(struct device *dev); diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index 1ea5e1d1545b..062777a45a74 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -91,7 +91,7 @@ struct bus_type { int (*uevent)(struct device *dev, struct kobj_uevent_env *env); int (*probe)(struct device *dev); void (*sync_state)(struct device *dev); - int (*remove)(struct device *dev); + void (*remove)(struct device *dev); void (*shutdown)(struct device *dev); int (*online)(struct device *dev); diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h index f40f77717a24..74891802200d 100644 --- a/include/linux/devm-helpers.h +++ b/include/linux/devm-helpers.h @@ -51,4 +51,29 @@ static inline int devm_delayed_work_autocancel(struct device *dev, return devm_add_action(dev, devm_delayed_work_drop, w); } +static inline void devm_work_drop(void *res) +{ + cancel_work_sync(res); +} + +/** + * devm_work_autocancel - Resource-managed work allocation + * @dev: Device which lifetime work is bound to + * @w: Work to be added (and automatically cancelled) + * @worker: Worker function + * + * Initialize work which is automatically cancelled when driver is detached. + * A few drivers need to queue work which must be cancelled before driver + * is detached to avoid accessing removed resources. + * devm_work_autocancel() can be used to omit the explicit + * cancelleation when driver is detached. + */ +static inline int devm_work_autocancel(struct device *dev, + struct work_struct *w, + work_func_t worker) +{ + INIT_WORK(w, worker); + return devm_add_action(dev, devm_work_drop, w); +} + #endif diff --git a/include/linux/dfl.h b/include/linux/dfl.h index 6cc10982351a..431636a0dc78 100644 --- a/include/linux/dfl.h +++ b/include/linux/dfl.h @@ -38,6 +38,7 @@ struct dfl_device { int id; u16 type; u16 feature_id; + u8 revision; struct resource mmio_res; int *irqs; unsigned int num_irqs; diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h index e42de7750c88..c1707ee5b540 100644 --- a/include/linux/dm-kcopyd.h +++ b/include/linux/dm-kcopyd.h @@ -51,6 +51,7 @@ MODULE_PARM_DESC(name, description) struct dm_kcopyd_client; struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle); void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); +void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc); /* * Submit a copy job to kcopyd. This is built on top of the diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index efdc56b9d95f..8b32b4bdd590 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -54,7 +54,7 @@ struct dma_buf_ops { * device), and otherwise need to fail the attach operation. * * The exporter should also in general check whether the current - * allocation fullfills the DMA constraints of the new device. If this + * allocation fulfills the DMA constraints of the new device. If this * is not the case, and the allocation cannot be moved, it should also * fail the attach operation. * @@ -96,6 +96,12 @@ struct dma_buf_ops { * This is called automatically for non-dynamic importers from * dma_buf_attach(). * + * Note that similar to non-dynamic exporters in their @map_dma_buf + * callback the driver must guarantee that the memory is available for + * use and cleared of any old data by the time this function returns. + * Drivers which pipeline their buffer moves internally must wait for + * all moves and clears to complete. + * * Returns: * * 0 on success, negative error code on failure. @@ -144,9 +150,18 @@ struct dma_buf_ops { * This is always called with the dmabuf->resv object locked when * the dynamic_mapping flag is true. * + * Note that for non-dynamic exporters the driver must guarantee that + * that the memory is available for use and cleared of any old data by + * the time this function returns. Drivers which pipeline their buffer + * moves internally must wait for all moves and clears to complete. + * Dynamic exporters do not need to follow this rule: For non-dynamic + * importers the buffer is already pinned through @pin, which has the + * same requirements. Dynamic importers otoh are required to obey the + * dma_resv fences. + * * Returns: * - * A &sg_table scatter list of or the backing storage of the DMA buffer, + * A &sg_table scatter list of the backing storage of the DMA buffer, * already mapped into the device address space of the &device attached * with the provided &dma_buf_attachment. The addresses and lengths in * the scatter list are PAGE_SIZE aligned. @@ -168,7 +183,7 @@ struct dma_buf_ops { * * This is called by dma_buf_unmap_attachment() and should unmap and * release the &sg_table allocated in @map_dma_buf, and it is mandatory. - * For static dma_buf handling this might also unpins the backing + * For static dma_buf handling this might also unpin the backing * storage if this is the last mapping of the DMA buffer. */ void (*unmap_dma_buf)(struct dma_buf_attachment *, @@ -237,7 +252,7 @@ struct dma_buf_ops { * This callback is used by the dma_buf_mmap() function * * Note that the mapping needs to be incoherent, userspace is expected - * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. * * Because dma-buf buffers have invariant size over their lifetime, the * dma-buf core checks whether a vma is too large and rejects such @@ -274,27 +289,6 @@ struct dma_buf_ops { /** * struct dma_buf - shared buffer object - * @size: size of the buffer; invariant over the lifetime of the buffer. - * @file: file pointer used for sharing buffers across, and for refcounting. - * @attachments: list of dma_buf_attachment that denotes all devices attached, - * protected by dma_resv lock. - * @ops: dma_buf_ops associated with this buffer object. - * @lock: used internally to serialize list manipulation, attach/detach and - * vmap/unmap - * @vmapping_counter: used internally to refcnt the vmaps - * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 - * @exp_name: name of the exporter; useful for debugging. - * @name: userspace-provided name; useful for accounting and debugging, - * protected by @resv. - * @name_lock: spinlock to protect name access - * @owner: pointer to exporter module; used for refcounting when exporter is a - * kernel module. - * @list_node: node for dma_buf accounting and debugging. - * @priv: exporter specific private data for this buffer object. - * @resv: reservation object linked to this dma-buf - * @poll: for userspace poll support - * @cb_excl: for userspace poll support - * @cb_shared: for userspace poll support * * This represents a shared buffer, created by calling dma_buf_export(). The * userspace representation is a normal file descriptor, which can be created by @@ -306,30 +300,152 @@ struct dma_buf_ops { * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { + /** + * @size: + * + * Size of the buffer; invariant over the lifetime of the buffer. + */ size_t size; + + /** + * @file: + * + * File pointer used for sharing buffers across, and for refcounting. + * See dma_buf_get() and dma_buf_put(). + */ struct file *file; + + /** + * @attachments: + * + * List of dma_buf_attachment that denotes all devices attached, + * protected by &dma_resv lock @resv. + */ struct list_head attachments; + + /** @ops: dma_buf_ops associated with this buffer object. */ const struct dma_buf_ops *ops; + + /** + * @lock: + * + * Used internally to serialize list manipulation, attach/detach and + * vmap/unmap. Note that in many cases this is superseeded by + * dma_resv_lock() on @resv. + */ struct mutex lock; + + /** + * @vmapping_counter: + * + * Used internally to refcnt the vmaps returned by dma_buf_vmap(). + * Protected by @lock. + */ unsigned vmapping_counter; + + /** + * @vmap_ptr: + * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. + */ struct dma_buf_map vmap_ptr; + + /** + * @exp_name: + * + * Name of the exporter; useful for debugging. See the + * DMA_BUF_SET_NAME IOCTL. + */ const char *exp_name; + + /** + * @name: + * + * Userspace-provided name; useful for accounting and debugging, + * protected by dma_resv_lock() on @resv and @name_lock for read access. + */ const char *name; + + /** @name_lock: Spinlock to protect name acces for read access. */ spinlock_t name_lock; + + /** + * @owner: + * + * Pointer to exporter module; used for refcounting when exporter is a + * kernel module. + */ struct module *owner; + + /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; + + /** @priv: exporter specific private data for this buffer object. */ void *priv; + + /** + * @resv: + * + * Reservation object linked to this dma-buf. + * + * IMPLICIT SYNCHRONIZATION RULES: + * + * Drivers which support implicit synchronization of buffer access as + * e.g. exposed in `Implicit Fence Poll Support`_ must follow the + * below rules. + * + * - Drivers must add a shared fence through dma_resv_add_shared_fence() + * for anything the userspace API considers a read access. This highly + * depends upon the API and window system. + * + * - Similarly drivers must set the exclusive fence through + * dma_resv_add_excl_fence() for anything the userspace API considers + * write access. + * + * - Drivers may just always set the exclusive fence, since that only + * causes unecessarily synchronization, but no correctness issues. + * + * - Some drivers only expose a synchronous userspace API with no + * pipelining across drivers. These do not set any fences for their + * access. An example here is v4l. + * + * DYNAMIC IMPORTER RULES: + * + * Dynamic importers, see dma_buf_attachment_is_dynamic(), have + * additional constraints on how they set up fences: + * + * - Dynamic importers must obey the exclusive fence and wait for it to + * signal before allowing access to the buffer's underlying storage + * through the device. + * + * - Dynamic importers should set fences for any access that they can't + * disable immediately from their &dma_buf_attach_ops.move_notify + * callback. + */ struct dma_resv *resv; - /* poll support */ + /** @poll: for userspace poll support */ wait_queue_head_t poll; + /** @cb_excl: for userspace poll support */ + /** @cb_shared: for userspace poll support */ struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; } cb_excl, cb_shared; +#ifdef CONFIG_DMABUF_SYSFS_STATS + /** + * @sysfs_entry: + * + * For exposing information about this buffer in sysfs. See also + * `DMA-BUF statistics`_ for the uapi this enables. + */ + struct dma_buf_sysfs_entry { + struct kobject kobj; + struct dma_buf *dmabuf; + } *sysfs_entry; +#endif }; /** @@ -464,7 +580,7 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) /** * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic - * mappinsg + * mappings * @attach: the DMA-buf attachment to check * * Returns true if a DMA-buf importer wants to call the map/unmap functions with diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 10462a029da2..54fe3443fd2c 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -12,25 +12,41 @@ #include <linux/dma-fence.h> #include <linux/irq_work.h> +#include <linux/slab.h> /** * struct dma_fence_chain - fence to represent an node of a fence chain * @base: fence base class - * @lock: spinlock for fence handling * @prev: previous fence of the chain * @prev_seqno: original previous seqno before garbage collection * @fence: encapsulated fence - * @cb: callback structure for signaling - * @work: irq work item for signaling + * @lock: spinlock for fence handling */ struct dma_fence_chain { struct dma_fence base; - spinlock_t lock; struct dma_fence __rcu *prev; u64 prev_seqno; struct dma_fence *fence; - struct dma_fence_cb cb; - struct irq_work work; + union { + /** + * @cb: callback for signaling + * + * This is used to add the callback for signaling the + * complection of the fence chain. Never used at the same time + * as the irq work. + */ + struct dma_fence_cb cb; + + /** + * @work: irq work item for signaling + * + * Irq work structure to allow us to add the callback without + * running into lock inversion. Never used at the same time as + * the callback. + */ + struct irq_work work; + }; + spinlock_t lock; }; extern const struct dma_fence_ops dma_fence_chain_ops; @@ -52,6 +68,30 @@ to_dma_fence_chain(struct dma_fence *fence) } /** + * dma_fence_chain_alloc + * + * Returns a new struct dma_fence_chain object or NULL on failure. + */ +static inline struct dma_fence_chain *dma_fence_chain_alloc(void) +{ + return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); +}; + +/** + * dma_fence_chain_free + * @chain: chain node to free + * + * Frees up an allocated but not used struct dma_fence_chain object. This + * doesn't need an RCU grace period since the fence was never initialized nor + * published. After dma_fence_chain_init() has been called the fence must be + * released by calling dma_fence_put(), and not through this function. + */ +static inline void dma_fence_chain_free(struct dma_fence_chain *chain) +{ + kfree(chain); +}; + +/** * dma_fence_chain_for_each - iterate over all fences in chain * @iter: current fence * @head: starting point diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 6e75a2d689b4..24607dc3c2ac 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -19,7 +19,8 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); +int iommu_dma_init_fq(struct iommu_domain *domain); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -50,10 +51,15 @@ struct msi_msg; struct device; static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size) + u64 dma_limit) { } +static inline int iommu_dma_init_fq(struct iommu_domain *domain) +{ + return -EINVAL; +} + static inline int iommu_get_dma_cookie(struct iommu_domain *domain) { return -ENODEV; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d53a96a3d64..0d5b06b3a4a6 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -41,8 +41,9 @@ struct dma_map_ops { size_t size, enum dma_data_direction dir, unsigned long attrs); /* - * map_sg returns 0 on error and a value > 0 on success. - * It should never return a value < 0. + * map_sg should return a negative error code on error. See + * dma_map_sgtable() for a list of appropriate error codes + * and their meanings. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); @@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); - -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle); -int dma_release_from_global_coherent(int order, void *vaddr); -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, - size_t size, int *ret); - #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) @@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev, #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#ifdef CONFIG_DMA_GLOBAL_POOL +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); +#else static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { @@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, { return 0; } -#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#endif /* CONFIG_DMA_GLOBAL_POOL */ /* * This is the actual return value from the ->alloc_noncontiguous method. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 183e7103a66d..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, unsigned long attrs); void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, @@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +static inline unsigned int dma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } @@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev, unsigned long attrs) { } +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + return -EOPNOTSUPP; +} static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -344,34 +352,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev, } /** - * dma_map_sgtable - Map the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * @attrs: Optional DMA attributes for the map operation - * - * Maps a buffer described by a scatterlist stored in the given sg_table - * object for the @dir DMA operation by the @dev device. After success the - * ownership for the buffer is transferred to the DMA domain. One has to - * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the - * ownership of the buffer back to the CPU domain before touching the - * buffer by the CPU. - * - * Returns 0 on success or -EINVAL on error during mapping the buffer. - */ -static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); - if (nents <= 0) - return -EINVAL; - sgt->nents = nents; - return 0; -} - -/** * dma_unmap_sgtable - Unmap the given buffer for DMA * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index d44a77e8a7e3..e1ca2080a1ff 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -78,19 +78,11 @@ struct dma_resv { #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) -/** - * dma_resv_get_list - get the reservation object's - * shared fence list, with update-side lock held - * @obj: the reservation object - * - * Returns the shared fence list. Does NOT take references to - * the fence. The obj->lock must be held. - */ -static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) -{ - return rcu_dereference_protected(obj->fence, - dma_resv_held(obj)); -} +#ifdef CONFIG_DEBUG_MUTEXES +void dma_resv_reset_shared_max(struct dma_resv *obj); +#else +static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +#endif /** * dma_resv_lock - lock the reservation object @@ -215,38 +207,29 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { -#ifdef CONFIG_DEBUG_MUTEXES - /* Test shared fence slot reservation */ - if (rcu_access_pointer(obj->fence)) { - struct dma_resv_list *fence = dma_resv_get_list(obj); - - fence->shared_max = fence->shared_count; - } -#endif + dma_resv_reset_shared_max(obj); ww_mutex_unlock(&obj->lock); } /** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held + * dma_resv_excl_fence - return the object's exclusive fence * @obj: the reservation object * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. + * Returns the exclusive fence (if any). Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each * * RETURNS * The exclusive fence or NULL */ static inline struct dma_fence * -dma_resv_get_excl(struct dma_resv *obj) +dma_resv_excl_fence(struct dma_resv *obj) { - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); + return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); } /** - * dma_resv_get_excl_rcu - get the reservation object's + * dma_resv_get_excl_unlocked - get the reservation object's * exclusive fence, without lock held. * @obj: the reservation object * @@ -257,7 +240,7 @@ dma_resv_get_excl(struct dma_resv *obj) * The exclusive fence or NULL if none */ static inline struct dma_fence * -dma_resv_get_excl_rcu(struct dma_resv *obj) +dma_resv_get_excl_unlocked(struct dma_resv *obj) { struct dma_fence *fence; @@ -271,23 +254,29 @@ dma_resv_get_excl_rcu(struct dma_resv *obj) return fence; } +/** + * dma_resv_shared_list - get the reservation object's shared fence list + * @obj: the reservation object + * + * Returns the shared fence list. Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each + */ +static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) +{ + return rcu_dereference_check(obj->fence, dma_resv_held(obj)); +} + void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 004736b6a9c8..e5c2c9e71bf1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -230,12 +230,6 @@ enum sum_check_flags { typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** - * struct dma_chan_percpu - the per-CPU part of struct dma_chan - * @memcpy_count: transaction counter - * @bytes_transferred: byte counter - */ - -/** * enum dma_desc_metadata_mode - per descriptor metadata mode types supported * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the * client driver and it is attached (via the dmaengine_desc_attach_metadata() @@ -291,6 +285,11 @@ enum dma_desc_metadata_mode { DESC_METADATA_ENGINE = BIT(1), }; +/** + * struct dma_chan_percpu - the per-CPU part of struct dma_chan + * @memcpy_count: transaction counter + * @bytes_transferred: byte counter + */ struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; @@ -381,6 +380,7 @@ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; /** @@ -399,7 +399,7 @@ enum dma_slave_buswidth { * @src_addr_width: this is the width in bytes of the source (RX) * register where DMA data shall be read. If the source * is memory this may be ignored depending on architecture. - * Legal values: 1, 2, 3, 4, 8, 16, 32, 64. + * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128. * @dst_addr_width: same as src_addr_width but for destination * target (TX) mutatis mutandis. * @src_maxburst: the maximum number of words (note: words, as in diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index b12b05f1c8b4..c7fa4a3498fe 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -11,137 +11,52 @@ struct dsa_switch; struct sk_buff; struct net_device; -struct packet_type; -struct dsa_8021q_context; -struct dsa_8021q_crosschip_link { +struct dsa_tag_8021q_vlan { struct list_head list; int port; - struct dsa_8021q_context *other_ctx; - int other_port; + u16 vid; refcount_t refcount; }; -struct dsa_8021q_ops { - int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags); - int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid); -}; - struct dsa_8021q_context { - const struct dsa_8021q_ops *ops; struct dsa_switch *ds; - struct list_head crosschip_links; + struct list_head vlans; /* EtherType of RX VID, used for filtering on master interface */ __be16 proto; }; -#define DSA_8021Q_N_SUBVLAN 8 +int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); -#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) +void dsa_tag_8021q_unregister(struct dsa_switch *ds); + +struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, + u16 tpid, u16 tci); -int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); -int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); +int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); -int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); +void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); -struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, - u16 tpid, u16 tci); +u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num); u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); -u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan); - int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -u16 dsa_8021q_rx_subvlan(u16 vid); - bool vid_is_dsa_8021q_rxvlan(u16 vid); bool vid_is_dsa_8021q_txvlan(u16 vid); bool vid_is_dsa_8021q(u16 vid); -#else - -int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled) -{ - return 0; -} - -int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port) -{ - return 0; -} - -int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port) -{ - return 0; -} - -struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, - u16 tpid, u16 tci) -{ - return NULL; -} - -u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) -{ - return 0; -} - -u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port) -{ - return 0; -} - -u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan) -{ - return 0; -} - -int dsa_8021q_rx_switch_id(u16 vid) -{ - return 0; -} - -int dsa_8021q_rx_source_port(u16 vid) -{ - return 0; -} - -u16 dsa_8021q_rx_subvlan(u16 vid) -{ - return 0; -} - -bool vid_is_dsa_8021q_rxvlan(u16 vid) -{ - return false; -} - -bool vid_is_dsa_8021q_txvlan(u16 vid) -{ - return false; -} - -bool vid_is_dsa_8021q(u16 vid) -{ - return false; -} - -#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ - #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index 1eb84562b311..171106202fe5 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -14,6 +14,9 @@ #define ETH_P_SJA1105 ETH_P_DSA_8021Q #define ETH_P_SJA1105_META 0x0008 +#define ETH_P_SJA1110 0xdadc + +#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull @@ -44,25 +47,64 @@ struct sja1105_tagger_data { */ spinlock_t meta_lock; unsigned long state; + u8 ts_id; }; struct sja1105_skb_cb { struct sk_buff *clone; - u32 meta_tstamp; + u64 tstamp; + /* Only valid for packets cloned for 2-step TX timestamping */ + u8 ts_id; }; #define SJA1105_SKB_CB(skb) \ ((struct sja1105_skb_cb *)((skb)->cb)) struct sja1105_port { - u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; struct kthread_worker *xmit_worker; struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; struct dsa_port *dp; bool hwts_tx_en; - u16 xmit_tpid; }; +enum sja1110_meta_tstamp { + SJA1110_META_TSTAMP_TX = 0, + SJA1110_META_TSTAMP_RX = 1, +}; + +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) + +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp); + +#else + +static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, + u8 ts_id, enum sja1110_meta_tstamp dir, + u64 tstamp) +{ +} + +#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ + +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105) + +extern const struct dsa_switch_ops sja1105_switch_ops; + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return dp->ds->ops == &sja1105_switch_ops; +} + +#else + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return false; +} + +#endif + #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 76d3562d3006..4207d06996a4 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR5: Unbuffered DDR5 RAM * @MEM_NVDIMM: Non-volatile RAM * @MEM_WIO2: Wide I/O 2. + * @MEM_HBM2: High bandwidth Memory Gen 2. */ enum mem_type { MEM_EMPTY = 0, @@ -212,6 +213,7 @@ enum mem_type { MEM_DDR5, MEM_NVDIMM, MEM_WIO2, + MEM_HBM2, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -239,6 +241,7 @@ enum mem_type { #define MEM_FLAG_DDR5 BIT(MEM_DDR5) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) #define MEM_FLAG_WIO2 BIT(MEM_WIO2) +#define MEM_FLAG_HBM2 BIT(MEM_HBM2) /** * enum edac_type - Error Detection and Correction capabilities and mode diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index 99580c22f91a..34c2175e6a1e 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -10,6 +10,9 @@ struct eeprom_93xx46_platform_data { #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ #define EE_READONLY 0x08 /* forbid writing */ +#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */ +#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */ +#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */ unsigned int quirks; /* Single word read transfers only; no sequential read. */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index dcb2f9022c1d..ef9ceead3db1 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -117,9 +117,11 @@ extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, enum elv_merge); -extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); +extern bool elv_attempt_insert_merge(struct request_queue *, struct request *, + struct list_head *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); +void elevator_init_mq(struct request_queue *q); /* * io scheduler registration diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 757fc60658fa..39dcadd492b5 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -11,7 +11,7 @@ #include <linux/types.h> /** - * em_perf_state - Performance state of a performance domain + * struct em_perf_state - Performance state of a performance domain * @frequency: The frequency in KHz, for consistency with CPUFreq * @power: The power consumed at this level (by 1 CPU or by a registered * device). It can be a total power: static and dynamic. @@ -25,7 +25,7 @@ struct em_perf_state { }; /** - * em_perf_domain - Performance domain + * struct em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states * @milliwatts: Flag indicating the power values are in milli-Watts @@ -53,6 +53,22 @@ struct em_perf_domain { #ifdef CONFIG_ENERGY_MODEL #define EM_MAX_POWER 0xFFFF +/* + * Increase resolution of energy estimation calculations for 64-bit + * architectures. The extra resolution improves decision made by EAS for the + * task placement when two Performance Domains might provide similar energy + * estimation values (w/o better resolution the values could be equal). + * + * We increase resolution only if we have enough bits to allow this increased + * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit + * are pretty high and the returns do not justify the increased costs. + */ +#ifdef CONFIG_64BIT +#define em_scale_power(p) ((p) * 1000) +#else +#define em_scale_power(p) (p) +#endif + struct em_data_callback { /** * active_power() - Provide power at the next performance state of @@ -87,10 +103,12 @@ void em_dev_unregister_perf_domain(struct device *dev); /** * em_cpu_energy() - Estimates the energy consumed by the CPUs of a - performance domain + * performance domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain + * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which + * might reflect reduced frequency (due to thermal) * * This function must be used only for CPU devices. There is no validation, * i.e. if the EM is a CPU type and has cpumask allocated. It is called from @@ -100,7 +118,8 @@ void em_dev_unregister_perf_domain(struct device *dev); * a capacity state satisfying the max utilization of the domain. */ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, - unsigned long max_util, unsigned long sum_util) + unsigned long max_util, unsigned long sum_util, + unsigned long allowed_cpu_cap) { unsigned long freq, scale_cpu; struct em_perf_state *ps; @@ -112,11 +131,17 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, /* * In order to predict the performance state, map the utilization of * the most utilized CPU of the performance domain to a requested - * frequency, like schedutil. + * frequency, like schedutil. Take also into account that the real + * frequency might be set lower (due to thermal capping). Thus, clamp + * max utilization to the allowed CPU capacity before calculating + * effective frequency. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); ps = &pd->table[pd->nr_perf_states - 1]; + + max_util = map_util_perf(max_util); + max_util = min(max_util, allowed_cpu_cap); freq = map_util_freq(max_util, ps->frequency, scale_cpu); /* @@ -209,7 +234,8 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev) return NULL; } static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, - unsigned long max_util, unsigned long sum_util) + unsigned long max_util, unsigned long sum_util, + unsigned long allowed_cpu_cap) { return 0; } diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 8b2b1d68b954..0d7865a0731c 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -2,7 +2,12 @@ #ifndef __LINUX_ENTRYKVM_H #define __LINUX_ENTRYKVM_H -#include <linux/entry-common.h> +#include <linux/static_call_types.h> +#include <linux/tracehook.h> +#include <linux/syscalls.h> +#include <linux/seccomp.h> +#include <linux/sched.h> +#include <linux/tick.h> /* Transfer to guest mode work */ #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK @@ -57,7 +62,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); static inline void xfer_to_guest_mode_prepare(void) { lockdep_assert_irqs_disabled(); - rcu_nocb_flush_deferred_wakeup(); + tick_nohz_user_enter_prepare(); } /** diff --git a/include/linux/errno.h b/include/linux/errno.h index d73f597a2484..8b0c754bab02 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -31,5 +31,6 @@ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ #define ERECALLCONFLICT 530 /* conflict with recalled state */ +#define ENOGRACE 531 /* NFS file lock reclaim refused */ #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 330345b1be54..928c411bd509 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -300,6 +300,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src) } /** + * eth_hw_addr_set - Assign Ethernet address to a net_device + * @dev: pointer to net_device structure + * @addr: address to assign + * + * Assign given address to the net_device, addr_assign_type is not changed. + */ +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} + +/** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to * @src: pointer to net_device to copy dev_addr from diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e030f7510cd3..849524b55d89 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -15,10 +15,9 @@ #include <linux/bitmap.h> #include <linux/compat.h> +#include <linux/netlink.h> #include <uapi/linux/ethtool.h> -#ifdef CONFIG_COMPAT - struct compat_ethtool_rx_flow_spec { u32 flow_type; union ethtool_flow_union h_u; @@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc { u32 rule_locs[]; }; -#endif /* CONFIG_COMPAT */ - #include <linux/rculist.h> /** @@ -176,6 +173,11 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; +}; + /** * ethtool_intersect_link_masks - Given two link masks, AND them together * @dst: first mask and where result is stored @@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) #define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) #define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0) +#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22) +#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) @@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) +#define ETHTOOL_COALESCE_USE_CQE \ + (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX) #define ETHTOOL_STAT_NOT_SET (~0ULL) @@ -401,12 +407,12 @@ struct ethtool_rmon_stats { * required information to the driver. */ struct ethtool_module_eeprom { - __u32 offset; - __u32 length; - __u8 page; - __u8 bank; - __u8 i2c_address; - __u8 *data; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 *data; }; /** @@ -606,8 +612,14 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); - int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*get_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, @@ -758,6 +770,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); /** + * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller + * is responsible to free memory of vclock_index + * @dev: pointer to net_device structure + * @vclock_index: pointer to pointer of vclock index + * + * Return number of phc vclocks + */ +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); + +/** * ethtool_sprintf - Write formatted string to ethtool string data * @data: Pointer to start of string to update * @fmt: Format of string to write diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index fa0a524baed0..305d5f19093b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> +#include <linux/sched.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); -DECLARE_PER_CPU(int, eventfd_wake_count); - -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return this_cpu_read(eventfd_wake_count); + return !current->in_eventfd_signal; } #else /* CONFIG_EVENTFD */ @@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return false; + return true; } static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 593322c946e6..3337745d81bd 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {} #endif +#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT) +/* ARM OABI has an incompatible struct layout and needs a special handler */ +extern struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent); +#else +static inline struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent) +{ + if (__put_user(revents, &uevent->events) || + __put_user(data, &uevent->data)) + return NULL; + + return uevent+1; +} +#endif + #endif /* #ifndef _LINUX_EVENTPOLL_H */ diff --git a/include/linux/evm.h b/include/linux/evm.h index 8302bc29bb35..4c374be70247 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -23,18 +23,25 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, struct integrity_iint_cache *iint); extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); -extern int evm_inode_setxattr(struct dentry *dentry, const char *name, +extern int evm_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, size_t size); extern void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); -extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); +extern int evm_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *xattr_name); extern void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name); extern int evm_inode_init_security(struct inode *inode, const struct xattr *xattr_array, struct xattr *evm); +extern bool evm_revalidate_status(const char *xattr_name); +extern int evm_protected_xattr_if_enabled(const char *req_xattr_name); +extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, + int buffer_size, char type, + bool canonical_fmt); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else @@ -71,7 +78,8 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) return; } -static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, +static inline int evm_inode_setxattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *name, const void *value, size_t size) { return 0; @@ -85,7 +93,8 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry, return; } -static inline int evm_inode_removexattr(struct dentry *dentry, +static inline int evm_inode_removexattr(struct user_namespace *mnt_userns, + struct dentry *dentry, const char *xattr_name) { return 0; @@ -104,5 +113,22 @@ static inline int evm_inode_init_security(struct inode *inode, return 0; } +static inline bool evm_revalidate_status(const char *xattr_name) +{ + return false; +} + +static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name) +{ + return false; +} + +static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, + int buffer_size, char type, + bool canonical_fmt) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ diff --git a/include/linux/export.h b/include/linux/export.h index 6271a5d9c988..27d848712b90 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -140,7 +140,12 @@ struct kernel_symbol { #define ___cond_export_sym(sym, sec, ns, enabled) \ __cond_export_sym_##enabled(sym, sec, ns) #define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns) + +#ifdef __GENKSYMS__ +#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym) +#else #define __cond_export_sym_0(sym, sec, ns) /* nothing */ +#endif #else diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index fe848901fcc3..3260fe714846 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -221,6 +221,8 @@ struct export_operations { #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply atomic attribute updates */ +#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do + asychronous blocking locks */ unsigned long flags; }; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 5487a80617a3..d445150c5350 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -34,6 +34,7 @@ #define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num) #define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num) #define F2FS_META_INO(sbi) ((sbi)->meta_ino_num) +#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid) #define F2FS_MAX_QUOTAS 3 @@ -229,6 +230,7 @@ struct f2fs_extent { #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ +#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ struct f2fs_inode { __le16 i_mode; /* file mode */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index a16dbeced152..eec3b7c40811 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ #define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) +#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD) + /* * fanotify_init() flags that require CAP_SYS_ADMIN. * We do not allow unprivileged groups to request permission events. @@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ */ #define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ FAN_REPORT_TID | \ + FAN_REPORT_PIDFD | \ FAN_UNLIMITED_QUEUE | \ FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index ecfbcc0553a5..5950f8f5dc74 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -2,6 +2,7 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H +#include <linux/refcount.h> #include <linux/kgdb.h> #include <uapi/linux/fb.h> @@ -435,7 +436,7 @@ struct fb_tile_ops { struct fb_info { - atomic_t count; + refcount_t count; int node; int flags; /* diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 766fcd973beb..a332e79b3207 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -12,10 +12,6 @@ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) -/* List of all valid flags for the how->upgrade_mask argument: */ -#define VALID_UPGRADE_FLAGS \ - (UPGRADE_NOWRITE | UPGRADE_NOREAD) - /* List of all valid flags for the how->resolve argument: */ #define VALID_RESOLVE_FLAGS \ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 4e624c466583..c50882f19235 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); -int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, u64 len, - get_block_t *get_block); - #endif /* _LINUX_FIEMAP_H 1 */ diff --git a/include/linux/file.h b/include/linux/file.h index 2de2e4613d7b..51e830b4fe3a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -94,6 +94,9 @@ extern void fd_install(unsigned int fd, struct file *file); extern int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); + +extern int receive_fd(struct file *file, unsigned int o_flags); + static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { @@ -101,10 +104,6 @@ static inline int receive_fd_user(struct file *file, int __user *ufd, return -EFAULT; return __receive_fd(file, ufd, o_flags); } -static inline int receive_fd(struct file *file, unsigned int o_flags) -{ - return __receive_fd(file, NULL, o_flags); -} int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); diff --git a/include/linux/filter.h b/include/linux/filter.h index 9a09547bc7ba..4a93c12543ee 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -5,8 +5,6 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ -#include <stdarg.h> - #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/compat.h> @@ -73,6 +71,11 @@ struct ctl_table_header; /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -390,6 +393,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ @@ -559,7 +572,8 @@ struct bpf_prog { kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1; /* Do we call get_func_ip() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -584,25 +598,38 @@ struct sk_filter { DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); -#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ - u32 __ret; \ - cant_migrate(); \ - if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ - struct bpf_prog_stats *__stats; \ - u64 __start = sched_clock(); \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - __stats = this_cpu_ptr(prog->stats); \ - u64_stats_update_begin(&__stats->syncp); \ - __stats->cnt++; \ - __stats->nsecs += sched_clock() - __start; \ - u64_stats_update_end(&__stats->syncp); \ - } else { \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - } \ - __ret; }) - -#define BPF_PROG_RUN(prog, ctx) \ - __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) +typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, + const struct bpf_insn *insnsi, + unsigned int (*bpf_func)(const void *, + const struct bpf_insn *)); + +static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, + const void *ctx, + bpf_dispatcher_fn dfunc) +{ + u32 ret; + + cant_migrate(); + if (static_branch_unlikely(&bpf_stats_enabled_key)) { + struct bpf_prog_stats *stats; + u64 start = sched_clock(); + + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + stats = this_cpu_ptr(prog->stats); + u64_stats_update_begin(&stats->syncp); + stats->cnt++; + stats->nsecs += sched_clock() - start; + u64_stats_update_end(&stats->syncp); + } else { + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + } + return ret; +} + +static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) +{ + return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); +} /* * Use in preemptible and therefore migratable context to make sure that @@ -621,7 +648,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, u32 ret; migrate_disable(); - ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func); + ret = bpf_prog_run(prog, ctx); migrate_enable(); return ret; } @@ -646,6 +673,7 @@ struct bpf_redirect_info { u32 flags; u32 tgt_index; void *tgt_value; + struct bpf_map *map; u32 map_id; enum bpf_map_type map_type; u32 kern_flags; @@ -693,7 +721,7 @@ static inline void bpf_restore_data_end( cb->data_end = saved_data_end; } -static inline u8 *bpf_skb_cb(struct sk_buff *skb) +static inline u8 *bpf_skb_cb(const struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with @@ -714,8 +742,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) /* Must be invoked with migration disabled */ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, - struct sk_buff *skb) + const void *ctx) { + const struct sk_buff *skb = ctx; u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; @@ -725,7 +754,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, memset(cb_data, 0, sizeof(cb_saved)); } - res = BPF_PROG_RUN(prog, skb); + res = bpf_prog_run(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); @@ -759,16 +788,25 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, DECLARE_BPF_DISPATCHER(xdp) +DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); + +u32 xdp_master_redirect(struct xdp_buff *xdp); + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { - /* Caller needs to hold rcu_read_lock() (!), otherwise program - * can be released while still running, or map elements could be - * freed early while still having concurrent users. XDP fastpath - * already takes rcu_read_lock() when fetching the program, so - * it's not necessary here anymore. + /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus + * under local_bh_disable(), which provides the needed RCU protection + * for accessing map entries. */ - return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) + act = xdp_master_redirect(xdp); + } + + return act; } void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); @@ -995,11 +1033,13 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, + struct sock *migrating_sk, u32 hash); #else static inline struct sock * bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, + struct sock *migrating_sk, u32 hash) { return NULL; @@ -1412,7 +1452,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; @@ -1450,7 +1490,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; @@ -1464,17 +1504,19 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, } #endif /* IS_ENABLED(CONFIG_IPV6) */ -static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags, +static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, + u64 flags, const u64 flag_mask, void *lookup_elem(struct bpf_map *map, u32 key)) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; /* Lower bits of the flags are used as return code on lookup failure */ - if (unlikely(flags > XDP_TX)) + if (unlikely(flags & ~(action_mask | flag_mask))) return XDP_ABORTED; ri->tgt_value = lookup_elem(map, ifindex); - if (unlikely(!ri->tgt_value)) { + if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { /* If the lookup fails we want to clear out the state in the * redirect_info struct completely, so that if an eBPF program * performs multiple lookups, the last one always takes @@ -1482,13 +1524,21 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind */ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ ri->map_type = BPF_MAP_TYPE_UNSPEC; - return flags; + return flags & action_mask; } ri->tgt_index = ifindex; ri->map_id = map->id; ri->map_type = map->map_type; + if (flags & BPF_F_BROADCAST) { + WRITE_ONCE(ri->map, map); + ri->flags = flags; + } else { + WRITE_ONCE(ri->map, NULL); + ri->flags = 0; + } + return XDP_REDIRECT; } diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 84e346ae766e..25109192cebe 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -6,8 +6,8 @@ #include <linux/compiler.h> #include <linux/gfp.h> -#define FW_ACTION_NOHOTPLUG 0 -#define FW_ACTION_HOTPLUG 1 +#define FW_ACTION_NOUEVENT 0 +#define FW_ACTION_UEVENT 1 struct firmware { size_t size; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 9d1a5c175065..56b426fe020c 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -52,6 +52,10 @@ #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U +/* Loader commands */ +#define PM_LOAD_PDI 0x701 +#define PDI_SRC_DDR 0xF + /* * Firmware FPGA Manager flags * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration @@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value); +int zynqmp_pm_load_pdi(const u32 src, const u64 address); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, { return -ENODEV; } + +static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h index 0b08ac20ab16..a6b4c07858cc 100644 --- a/include/linux/fpga/altera-pr-ip-core.h +++ b/include/linux/fpga/altera-pr-ip-core.h @@ -13,6 +13,5 @@ #include <linux/io.h> int alt_pr_register(struct device *dev, void __iomem *reg_base); -void alt_pr_unregister(struct device *dev); #endif /* _ALT_PR_IP_CORE_H */ diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index 817600a32c93..6c3c28806ff1 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -11,7 +11,7 @@ struct fpga_bridge; /** * struct fpga_bridge_ops - ops for low level FPGA bridge drivers * @enable_show: returns the FPGA bridge's status - * @enable_set: set a FPGA bridge as enabled or disabled + * @enable_set: set an FPGA bridge as enabled or disabled * @fpga_bridge_remove: set FPGA into a specific state during driver remove * @groups: optional attribute groups. */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 2bc3030a69e5..474c1f506307 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -75,7 +75,7 @@ enum fpga_mgr_states { #define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) /** - * struct fpga_image_info - information specific to a FPGA image + * struct fpga_image_info - information specific to an FPGA image * @flags: boolean flags as defined above * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) @@ -110,7 +110,7 @@ struct fpga_image_info { * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code - * @write_init: prepare the FPGA to receive confuration data + * @write_init: prepare the FPGA to receive configuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done diff --git a/include/linux/fs.h b/include/linux/fs.h index c3c88fdb9b2a..e7a633353fd2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -319,6 +319,8 @@ enum rw_hint { /* iocb->ki_waitq is valid */ #define IOCB_WAITQ (1 << 19) #define IOCB_NOIO (1 << 20) +/* can use bio alloc cache */ +#define IOCB_ALLOC_CACHE (1 << 21) struct kiocb { struct file *ki_filp; @@ -436,6 +438,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * struct address_space - Contents of a cacheable, mappable object. * @host: Owner, either the inode or the block_device. * @i_pages: Cached pages. + * @invalidate_lock: Guards coherency between page cache contents and + * file offset->disk block mappings in the filesystem during invalidates. + * It is also used to block modification of page cache contents through + * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). @@ -453,6 +459,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping, struct address_space { struct inode *host; struct xarray i_pages; + struct rw_semaphore invalidate_lock; gfp_t gfp_mask; atomic_t i_mmap_writable; #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@ -581,6 +588,11 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +/* + * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to + * cache the ACL. This also means that ->get_acl() can be called in RCU mode + * with the LOOKUP_RCU flag. + */ #define ACL_DONT_CACHE ((void *)(-3)) static inline struct posix_acl * @@ -814,9 +826,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla down_read_nested(&inode->i_rwsem, subclass); } +static inline void filemap_invalidate_lock(struct address_space *mapping) +{ + down_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock(struct address_space *mapping) +{ + up_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_lock_shared(struct address_space *mapping) +{ + down_read(&mapping->invalidate_lock); +} + +static inline int filemap_invalidate_trylock_shared( + struct address_space *mapping) +{ + return down_read_trylock(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock_shared( + struct address_space *mapping) +{ + up_read(&mapping->invalidate_lock); +} + void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); +void filemap_invalidate_lock_two(struct address_space *mapping1, + struct address_space *mapping2); +void filemap_invalidate_unlock_two(struct address_space *mapping1, + struct address_space *mapping2); + + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic @@ -997,6 +1042,7 @@ static inline struct file *get_file(struct file *f) #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ +#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) @@ -1507,8 +1553,11 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* Pending fsnotify inode refs */ - atomic_long_t s_fsnotify_inode_refs; + /* + * Number of inode/mount/sb objects that are being watched, note that + * inodes objects are currently double-accounted. + */ + atomic_long_t s_fsnotify_connectors; /* Being remounted read-only */ int s_readonly_remount; @@ -2065,7 +2114,7 @@ struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); int (*permission) (struct user_namespace *, struct inode *, int); - struct posix_acl * (*get_acl)(struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int, bool); int (*readlink) (struct dentry *, char __user *,int); @@ -2171,7 +2220,6 @@ struct super_operations { ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); struct dquot **(*get_dquots)(struct inode *); #endif - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, struct shrink_control *); long (*free_cached_objects)(struct super_block *, @@ -2458,7 +2506,6 @@ static inline void file_accessed(struct file *file) extern int file_modified(struct file *file); -int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); struct file_system_type { @@ -2488,6 +2535,7 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; @@ -2571,90 +2619,6 @@ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_MASK) -#ifdef CONFIG_MANDATORY_FILE_LOCKING -extern int locks_mandatory_locked(struct file *); -extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); - -/* - * Candidates for mandatory locking have the setgid bit set - * but no group execute bit - an otherwise meaningless combination. - */ - -static inline int __mandatory_lock(struct inode *ino) -{ - return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; -} - -/* - * ... and these candidates should be on SB_MANDLOCK mounted fs, - * otherwise these will be advisory locks - */ - -static inline int mandatory_lock(struct inode *ino) -{ - return IS_MANDLOCK(ino) && __mandatory_lock(ino); -} - -static inline int locks_verify_locked(struct file *file) -{ - if (mandatory_lock(locks_inode(file))) - return locks_mandatory_locked(file); - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, - struct file *f, - loff_t size) -{ - if (!inode->i_flctx || !mandatory_lock(inode)) - return 0; - - if (size < inode->i_size) { - return locks_mandatory_area(inode, f, size, inode->i_size - 1, - F_WRLCK); - } else { - return locks_mandatory_area(inode, f, inode->i_size, size - 1, - F_WRLCK); - } -} - -#else /* !CONFIG_MANDATORY_FILE_LOCKING */ - -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(struct inode *inode, struct file *filp, - loff_t start, loff_t end, unsigned char type) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - -#endif /* CONFIG_MANDATORY_FILE_LOCKING */ - - #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { @@ -2769,8 +2733,14 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode); extern struct file *file_open_name(struct filename *, int, umode_t); extern struct file *filp_open(const char *, int, umode_t); -extern struct file *file_open_root(struct dentry *, struct vfsmount *, +extern struct file *file_open_root(const struct path *, const char *, int, umode_t); +static inline struct file *file_open_root_mnt(struct vfsmount *mnt, + const char *name, int flags, umode_t mode) +{ + return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root}, + name, flags, mode); +} extern struct file * dentry_open(const struct path *, int, const struct cred *); extern struct file * open_with_fake_path(const struct path *, int, struct inode*, const struct cred *); @@ -2781,6 +2751,7 @@ static inline struct file *file_clone_open(struct file *file) extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); +extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); extern void putname(struct filename *name); @@ -2886,6 +2857,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern int filemap_check_errors(struct address_space *mapping); extern void __filemap_set_wb_err(struct address_space *mapping, int err); +int filemap_fdatawrite_wbc(struct address_space *mapping, + struct writeback_control *wbc); static inline int filemap_write_and_wait(struct address_space *mapping) { @@ -3050,15 +3023,20 @@ static inline void file_end_write(struct file *file) } /* + * This is used for regular files where some users -- especially the + * currently executed binary in a process, previously handled via + * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap + * read-write shared) accesses. + * * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. - * This is used for regular files. - * We cannot support write (and maybe mmap read-write shared) accesses and - * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode - * can have the following values: - * 0: no writers, no VM_DENYWRITE mappings - * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist - * > 0: (i_writecount) users are writing to the file. + * deny_write_access() denies write access to a file. + * allow_write_access() re-enables write access to a file. + * + * The i_writecount field of an inode can have the following values: + * 0: no write access, no denied write access + * < 0: (-i_writecount) users that denied write access to the file. + * > 0: (i_writecount) users that have write access to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to @@ -3241,13 +3219,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, struct iov_iter *iter); -/* fs/block_dev.c */ -extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); -extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); -extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, - int datasync); -extern void block_sync_page(struct page *page); - /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); @@ -3353,6 +3324,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); +void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); @@ -3417,18 +3389,14 @@ extern int simple_rename(struct user_namespace *, struct inode *, extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); -extern int noop_set_page_dirty(struct page *page); extern void noop_invalidatepage(struct page *page, unsigned int offset, unsigned int length); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); -extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); -extern int simple_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); +extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); @@ -3471,6 +3439,8 @@ extern int buffer_migrate_page_norefs(struct address_space *, #define buffer_migrate_page_norefs NULL #endif +int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, + unsigned int ia_valid); int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); void setattr_copy(struct user_namespace *, struct inode *inode, @@ -3624,7 +3594,7 @@ int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); -int __init get_filesystem_list(char *buf); +int __init list_bdev_fs_names(char *buf, size_t size); #define __FMODE_EXEC ((__force int) FMODE_EXEC) #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 37e1e8f7f08d..6b54982fc5f3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -139,6 +139,9 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +extern int vfs_parse_fs_param_source(struct fs_context *fc, + struct fs_parameter *param); +extern void fc_drop_locked(struct fs_context *fc); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 3235ddbdcc09..8d39491c5f9f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -147,7 +147,6 @@ struct fscache_retrieval { fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ - unsigned long start_time; /* time at which retrieval started */ atomic_t n_pages; /* number of pages to be retrieved */ }; @@ -385,9 +384,6 @@ struct fscache_object { struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ -#ifdef CONFIG_FSCACHE_OBJECT_LIST - struct rb_node objlist_link; /* link in global object list */ -#endif pgoff_t store_limit; /* current storage limit */ loff_t store_limit_l; /* current storage limit */ }; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index abc1c4737fb8..a4dab5998613 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -123,15 +123,17 @@ struct fscache_netfs { * - indices are created on disk just-in-time */ struct fscache_cookie { - atomic_t usage; /* number of users of this cookie */ + refcount_t ref; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ atomic_t n_active; /* number of active users of netfs ptrs */ + unsigned int debug_id; spinlock_t lock; spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ struct hlist_bl_node hash_link; /* Link in hash table */ + struct list_head proc_link; /* Link in proc list */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 2ea1387bb497..e912ed9141d9 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -47,27 +47,128 @@ struct fscrypt_name { #define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION + /* - * fscrypt superblock flags + * If set, the fscrypt bounce page pool won't be allocated (unless another + * filesystem needs it). Set this if the filesystem always uses its own bounce + * pages for writes and therefore won't need the fscrypt bounce page pool. */ #define FS_CFLG_OWN_PAGES (1U << 1) -/* - * crypto operations for filesystems - */ +/* Crypto operations for filesystems */ struct fscrypt_operations { + + /* Set of optional flags; see above for allowed flags */ unsigned int flags; + + /* + * If set, this is a filesystem-specific key description prefix that + * will be accepted for "logon" keys for v1 fscrypt policies, in + * addition to the generic prefix "fscrypt:". This functionality is + * deprecated, so new filesystems shouldn't set this field. + */ const char *key_prefix; + + /* + * Get the fscrypt context of the given inode. + * + * @inode: the inode whose context to get + * @ctx: the buffer into which to get the context + * @len: length of the @ctx buffer in bytes + * + * Return: On success, returns the length of the context in bytes; this + * may be less than @len. On failure, returns -ENODATA if the + * inode doesn't have a context, -ERANGE if the context is + * longer than @len, or another -errno code. + */ int (*get_context)(struct inode *inode, void *ctx, size_t len); + + /* + * Set an fscrypt context on the given inode. + * + * @inode: the inode whose context to set. The inode won't already have + * an fscrypt context. + * @ctx: the context to set + * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE) + * @fs_data: If called from fscrypt_set_context(), this will be the + * value the filesystem passed to fscrypt_set_context(). + * Otherwise (i.e. when called from + * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL. + * + * i_rwsem will be held for write. + * + * Return: 0 on success, -errno on failure. + */ int (*set_context)(struct inode *inode, const void *ctx, size_t len, void *fs_data); + + /* + * Get the dummy fscrypt policy in use on the filesystem (if any). + * + * Filesystems only need to implement this function if they support the + * test_dummy_encryption mount option. + * + * Return: A pointer to the dummy fscrypt policy, if the filesystem is + * mounted with test_dummy_encryption; otherwise NULL. + */ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb); + + /* + * Check whether a directory is empty. i_rwsem will be held for write. + */ bool (*empty_dir)(struct inode *inode); + + /* The filesystem's maximum ciphertext filename length, in bytes */ unsigned int max_namelen; + + /* + * Check whether the filesystem's inode numbers and UUID are stable, + * meaning that they will never be changed even by offline operations + * such as filesystem shrinking and therefore can be used in the + * encryption without the possibility of files becoming unreadable. + * + * Filesystems only need to implement this function if they want to + * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These + * flags are designed to work around the limitations of UFS and eMMC + * inline crypto hardware, and they shouldn't be used in scenarios where + * such hardware isn't being used. + * + * Leaving this NULL is equivalent to always returning false. + */ bool (*has_stable_inodes)(struct super_block *sb); + + /* + * Get the number of bits that the filesystem uses to represent inode + * numbers and file logical block numbers. + * + * By default, both of these are assumed to be 64-bit. This function + * can be implemented to declare that either or both of these numbers is + * shorter, which may allow the use of the + * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of + * inline crypto hardware whose maximum DUN length is less than 64 bits + * (e.g., eMMC v5.2 spec compliant hardware). This function only needs + * to be implemented if support for one of these features is needed. + */ void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + + /* + * Return the number of block devices to which the filesystem may write + * encrypted file contents. + * + * If the filesystem can use multiple block devices (other than block + * devices that aren't used for encrypted file contents, such as + * external journal devices), and wants to support inline encryption, + * then it must implement this function. Otherwise it's not needed. + */ int (*get_num_devices)(struct super_block *sb); + + /* + * If ->get_num_devices() returns a value greater than 1, then this + * function is called to get the array of request_queues that the + * filesystem is using -- one per block device. (There may be duplicate + * entries in this array, as block devices can share a request_queue.) + */ void (*get_devices)(struct super_block *sb, struct request_queue **devs); }; @@ -253,6 +354,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { @@ -583,6 +685,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, return ERR_PTR(-EOPNOTSUPP); } +static inline int fscrypt_symlink_getattr(const struct path *path, + struct kstat *stat) +{ + return -EOPNOTSUPP; +} + static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 63b56aba925a..30ece3ae6df7 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev); +struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, + u16 if_id); extern struct bus_type fsl_mc_bus_type; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f8acddcf54fb..12d3a7d308ab 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { + if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0) + return; + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } @@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, static inline void fsnotify_inode(struct inode *inode, __u32 mask) { + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return; + if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; @@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, { struct inode *inode = d_inode(dentry); + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return 0; + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a69f363b61bf..832e65f06754 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); +/** + * ftrace_need_init_nop - return whether nop call sites should be initialized + * + * Normally the compiler's -mnop-mcount generates suitable nops, so we don't + * need to call ftrace_init_nop() if the code is built with that flag. + * Architectures where this is not always the case may define their own + * condition. + * + * Return must be: + * 0 if ftrace_init_nop() should be called + * Nonzero if ftrace_init_nop() should not be called + */ + +#ifndef ftrace_need_init_nop +#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) +#endif /** * ftrace_init_nop - initialize a nop call site diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 0abd9a1d2852..f6faa31289ba 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -7,12 +7,21 @@ extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); #endif +#ifdef CONFIG_OSNOISE_TRACER +extern bool trace_osnoise_callback_enabled; +extern void trace_osnoise_callback(bool enter); +#endif + static inline void ftrace_nmi_enter(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif +#ifdef CONFIG_OSNOISE_TRACER + if (trace_osnoise_callback_enabled) + trace_osnoise_callback(true); +#endif } static inline void ftrace_nmi_exit(void) @@ -21,6 +30,10 @@ static inline void ftrace_nmi_exit(void) if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); #endif +#ifdef CONFIG_OSNOISE_TRACER + if (trace_osnoise_callback_enabled) + trace_osnoise_callback(false); +#endif } #endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h new file mode 100644 index 000000000000..faf603c48c86 --- /dev/null +++ b/include/linux/fwnode_mdio.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * FWNODE helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_FWNODE_MDIO_H +#define __LINUX_FWNODE_MDIO_H + +#include <linux/phy.h> + +#if IS_ENABLED(CONFIG_FWNODE_MDIO) +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr); + +int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, u32 addr); + +#else /* CONFIG_FWNODE_MDIO */ +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr) +{ + return -EINVAL; +} + +static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, + u32 addr) +{ + return -EINVAL; +} +#endif + +#endif /* __LINUX_FWNODE_MDIO_H */ diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index bc738504ab4a..c285968e437a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -8,34 +8,11 @@ /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); -#ifdef CONFIG_LOCKDEP -extern bool lockdep_genl_is_held(void); -#endif /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; -/** - * rcu_dereference_genl - rcu_dereference with debug checking - * @p: The pointer to read, prior to dereferencing - * - * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() - * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() - */ -#define rcu_dereference_genl(p) \ - rcu_dereference_check(p, lockdep_genl_is_held()) - -/** - * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex - * @p: The pointer to read, prior to dereferencing - * - * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds genl mutex. - */ -#define genl_dereference(p) \ - rcu_dereference_protected(p, lockdep_genl_is_held()) - #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 6fc26f7bdf71..c68d83c87f83 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -60,9 +60,6 @@ struct partition_meta_info { * device. * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl. * - * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up", - * with a similar meaning to network interfaces. - * * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include * partition information in ``/proc/partitions`` or in the output of * printk_all_partitions(). @@ -97,7 +94,6 @@ struct partition_meta_info { /* 2 is unused (used to be GENHD_FL_DRIVERFS) */ /* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */ #define GENHD_FL_CD 0x0008 -#define GENHD_FL_UP 0x0010 #define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020 #define GENHD_FL_EXT_DEVT 0x0040 #define GENHD_FL_NATIVE_CAPACITY 0x0080 @@ -153,8 +149,15 @@ struct gendisk { unsigned long state; #define GD_NEED_PART_SCAN 0 #define GD_READ_ONLY 1 - struct kobject *slave_dir; + struct mutex open_mutex; /* open/close mutex */ + unsigned open_partitions; /* number of open partitions */ + + struct backing_dev_info *bdi; + struct kobject *slave_dir; +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED + struct list_head slave_bdevs; +#endif struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; @@ -167,8 +170,14 @@ struct gendisk { int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; + u64 diskseq; }; +static inline bool disk_live(struct gendisk *disk) +{ + return !inode_unhashed(disk->part0->bd_inode); +} + /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. @@ -205,20 +214,13 @@ static inline dev_t disk_devt(struct gendisk *disk) void disk_uevent(struct gendisk *disk, enum kobject_action action); /* block/genhd.c */ -extern void device_add_disk(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups); -static inline void add_disk(struct gendisk *disk) +int device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline int add_disk(struct gendisk *disk) { - device_add_disk(NULL, disk, NULL); + return device_add_disk(NULL, disk, NULL); } -extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); -static inline void add_disk_no_queue_reg(struct gendisk *disk) -{ - device_add_disk_no_queue_reg(NULL, disk); -} - extern void del_gendisk(struct gendisk *gp); -extern struct block_device *bdget_disk(struct gendisk *disk, int partno); void set_disk_ro(struct gendisk *disk, bool read_only); @@ -232,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); bool set_capacity_and_notify(struct gendisk *disk, sector_t size); +bool disk_force_media_change(struct gendisk *disk, unsigned int events); /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; @@ -252,30 +255,30 @@ static inline sector_t get_capacity(struct gendisk *disk) return bdev_nr_sectors(disk->part0); } -int bdev_disk_changed(struct block_device *bdev, bool invalidate); -int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); +int bdev_disk_changed(struct gendisk *disk, bool invalidate); void blk_drop_partitions(struct gendisk *disk); -extern struct gendisk *__alloc_disk_node(int minors, int node_id); +struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, + struct lock_class_key *lkclass); extern void put_disk(struct gendisk *disk); +struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); -#define alloc_disk_node(minors, node_id) \ +/** + * blk_alloc_disk - allocate a gendisk structure + * @node_id: numa node to allocate on + * + * Allocate and pre-initialize a gendisk structure for use with BIO based + * drivers. + * + * Context: can sleep + */ +#define blk_alloc_disk(node_id) \ ({ \ static struct lock_class_key __key; \ - const char *__name; \ - struct gendisk *__disk; \ - \ - __name = "(gendisk_completion)"#minors"("#node_id")"; \ - \ - __disk = __alloc_disk_node(minors, node_id); \ \ - if (__disk) \ - lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ - \ - __disk; \ + __blk_alloc_disk(node_id, &__key); \ }) - -#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +void blk_cleanup_disk(struct gendisk *disk); int __register_blkdev(unsigned int major, const char *name, void (*probe)(dev_t devt)); @@ -291,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size); int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -#ifdef CONFIG_SYSFS +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +int bd_register_pending_holders(struct gendisk *disk); #else static inline int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) @@ -304,8 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { } -#endif /* CONFIG_SYSFS */ +static inline int bd_register_pending_holders(struct gendisk *disk) +{ + return 0; +} +#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ +dev_t part_devt(struct gendisk *disk, u8 partno); +void inc_diskseq(struct gendisk *disk); dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 11da8af06704..55b2ec1f965a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -53,8 +53,10 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u +#define ___GFP_ZEROTAGS 0x800000u +#define ___GFP_SKIP_KASAN_POISON 0x1000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x800000u +#define ___GFP_NOLOCKDEP 0x2000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -229,16 +231,25 @@ struct vm_area_struct; * %__GFP_COMP address compound page metadata. * * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if + * __GFP_ZERO is set. + * + * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned + * on deallocation. Typically used for userspace pages. Currently only has an + * effect in HW tags mode. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -319,7 +330,8 @@ struct vm_area_struct; #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ + __GFP_SKIP_KASAN_POISON) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) @@ -494,8 +506,8 @@ static inline int gfp_zonelist(gfp_t flags) * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * - * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets - * optimized to &contig_page_data at compile-time. + * For the case of non-NUMA systems the NODE_DATA() gets optimized to + * &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { @@ -536,6 +548,15 @@ alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_arr return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); } +static inline unsigned long +alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) +{ + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); + + return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); +} + /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index c73b25bc9213..97a28ad3393b 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -609,7 +609,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) struct device_node; -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, +struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); @@ -619,7 +619,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, struct device_node; static inline -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, +struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) @@ -633,7 +633,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, struct device_node; struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, + const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); @@ -644,7 +644,7 @@ struct device_node; static inline struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, + const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) @@ -680,10 +680,10 @@ struct acpi_gpio_mapping { unsigned int quirks; }; -#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) - struct acpi_device; +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) + int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios); void acpi_dev_remove_driver_gpios(struct acpi_device *adev); @@ -692,9 +692,9 @@ int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); void devm_acpi_dev_remove_driver_gpios(struct device *dev); -#else /* CONFIG_GPIOLIB && CONFIG_ACPI */ +struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label); -struct acpi_device; +#else /* CONFIG_GPIOLIB && CONFIG_ACPI */ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 3a268781fcec..a0f9901dcae6 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -312,6 +312,9 @@ struct gpio_irq_chip { * get rid of the static GPIO number space in the long run. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). + * @offset: when multiple gpio chips belong to the same device this + * can be used as offset within the device so friendly names can + * be properly assigned. * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the @@ -398,6 +401,7 @@ struct gpio_chip { int base; u16 ngpio; + u16 offset; const char *const *names; bool can_sleep; diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index 334dd928042b..a9f7b7faf57b 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -37,6 +37,9 @@ struct regmap; * offset to a register/bitmask pair. If not * given the default gpio_regmap_simple_xlate() * is used. + * @drvdata: (Optional) Pointer to driver specific data which is + * not used by gpio-remap but is provided "as is" to the + * driver callback(s). * * The ->reg_mask_xlate translates a given base address and GPIO offset to * register and mask pair. The base address is one of the given register @@ -78,13 +81,14 @@ struct gpio_regmap_config { int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, unsigned int *mask); + + void *drvdata; }; struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config); void gpio_regmap_unregister(struct gpio_regmap *gpio); struct gpio_regmap *devm_gpio_regmap_register(struct device *dev, const struct gpio_regmap_config *config); -void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data); void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio); #endif /* _LINUX_GPIO_REGMAP_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 69bc86ea382c..76878b357ffa 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -116,7 +116,6 @@ extern void rcu_nmi_exit(void); do { \ lockdep_off(); \ arch_nmi_enter(); \ - printk_nmi_enter(); \ BUG_ON(in_nmi() == NMI_MASK); \ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ } while (0) @@ -135,7 +134,6 @@ extern void rcu_nmi_exit(void); do { \ BUG_ON(!in_nmi()); \ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - printk_nmi_exit(); \ arch_nmi_exit(); \ lockdep_on(); \ } while (0) diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index cacc4dd27794..630a388035f1 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -22,7 +22,7 @@ struct hdlc_proto { void (*start)(struct net_device *dev); /* if open & DCD */ void (*stop)(struct net_device *dev); /* if open & !DCD */ void (*detach)(struct net_device *dev); - int (*ioctl)(struct net_device *dev, struct ifreq *ifr); + int (*ioctl)(struct net_device *dev, struct if_settings *ifs); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); @@ -54,7 +54,7 @@ typedef struct hdlc_device { /* Exported from hdlc module */ /* Called by hardware driver when a user requests HDLC service */ -int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs); /* Must be used by hardware driver on module startup/exit */ #define register_hdlc_device(dev) register_netdev(dev) diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index d4d633a49d36..5d70c3f98f5b 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -79,7 +79,7 @@ struct hdlcdrv_ops { */ int (*open)(struct net_device *); int (*close)(struct net_device *); - int (*ioctl)(struct net_device *, struct ifreq *, + int (*ioctl)(struct net_device *, void __user *, struct hdlcdrv_ioctl *, int); }; diff --git a/include/linux/hid.h b/include/linux/hid.h index 10e922cee4eb..9e067f937dbc 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -102,6 +102,7 @@ struct hid_item { #define HID_COLLECTION_PHYSICAL 0 #define HID_COLLECTION_APPLICATION 1 #define HID_COLLECTION_LOGICAL 2 +#define HID_COLLECTION_NAMED_ARRAY 4 /* * HID report descriptor global item tags @@ -800,6 +801,7 @@ struct hid_driver { * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device + * @may_wakeup: return if device may act as a wakeup source during system-suspend */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); @@ -824,6 +826,7 @@ struct hid_ll_driver { int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); + bool (*may_wakeup)(struct hid_device *hdev); }; extern struct hid_ll_driver i2c_hid_ll_driver; @@ -1150,6 +1153,22 @@ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, } /** + * hid_may_wakeup - return if the hid device may act as a wakeup source during system-suspend + * + * @hdev: hid device + */ +static inline bool hid_hw_may_wakeup(struct hid_device *hdev) +{ + if (hdev->ll_driver->may_wakeup) + return hdev->ll_driver->may_wakeup(hdev); + + if (hdev->dev.parent) + return device_may_wakeup(hdev->dev.parent); + + return false; +} + +/** * hid_hw_wait - wait for buffered io to complete * * @hdev: hid device diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 7902c7d8b55f..4aa1031d3e4c 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr) static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); + pagefault_disable(); return __kmap_local_page_prot(page, prot); } @@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic_pfn(unsigned long pfn) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); + pagefault_disable(); return __kmap_local_pfn_prot(pfn, kmap_prot); } @@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr) { kunmap_local_indexed(addr); pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } unsigned int __nr_free_highpages(void); @@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); pagefault_disable(); return page_address(page); } @@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr) kunmap_flush_on_unmap(addr); #endif pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } static inline unsigned int nr_free_highpages(void) { return 0; } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 832b49b50c7b..b4c49f9cc379 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -130,10 +130,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page } #endif -#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE static inline void flush_kernel_vmap_range(void *vaddr, int size) { } @@ -152,28 +149,24 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) } #endif -#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE /** - * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags - * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * - * This function will allocate a page for a VMA but the caller is expected - * to specify via movableflags whether the page will be movable in the - * future or not + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed * * An architecture may override this function by defining - * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own * implementation. */ static inline struct page * -__alloc_zeroed_user_highpage(gfp_t movableflags, - struct vm_area_struct *vma, - unsigned long vaddr) +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) { - struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, - vma, vaddr); + struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (page) clear_user_highpage(page, vaddr); @@ -182,21 +175,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags, } #endif -/** - * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move - * @vma: The VMA the page is to be allocated for - * @vaddr: The virtual address the page will be inserted into - * - * This function will allocate a page for a VMA that the caller knows will - * be able to migrate in the future using move_pages() or reclaimed - */ -static inline struct page * -alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, - unsigned long vaddr) -{ - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); -} - static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); @@ -204,6 +182,14 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } +#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE + +static inline void tag_clear_highpage(struct page *page) +{ +} + +#endif + /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. @@ -329,14 +315,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset, VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); + flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { - char *addr = kmap_atomic(page); + char *addr = kmap_local_page(page); memset(addr + offset, 0, len); - kunmap_atomic(addr); + flush_dcache_page(page); + kunmap_local(addr); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 866a0fa104c4..2fd2e91d5107 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -113,7 +113,7 @@ int hmm_range_fault(struct hmm_range *range); * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range * * When waiting for mmu notifiers we need some kind of time out otherwise we - * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to + * could potentially wait for ever, 1000ms ie 1s sounds like a long time to * wait already. */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9b0487c88571..7bccf589aba7 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, u32 syncpt_id); +struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); + /* * host1x channel */ @@ -216,8 +218,8 @@ struct host1x_job { struct host1x_client *client; /* Gathers and their memory */ - struct host1x_job_gather *gathers; - unsigned int num_gathers; + struct host1x_job_cmd *cmds; + unsigned int num_cmds; /* Array of handles to be pinned & unpinned */ struct host1x_reloc *relocs; @@ -234,9 +236,15 @@ struct host1x_job { u32 syncpt_incrs; u32 syncpt_end; + /* Completion waiter ref */ + void *waiter; + /* Maximum time to wait for this job */ unsigned int timeout; + /* Job has timed out and should be released */ + bool cancelled; + /* Index and number of slots used in the push buffer */ unsigned int first_get; unsigned int num_slots; @@ -257,12 +265,25 @@ struct host1x_job { /* Add a channel wait for previous ops to complete */ bool serialize; + + /* Fast-forward syncpoint increments on job timeout */ + bool syncpt_recovery; + + /* Callback called when job is freed */ + void (*release)(struct host1x_job *job); + void *user_data; + + /* Whether host1x-side firewall should be ran for this job or not */ + bool enable_firewall; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, - u32 num_cmdbufs, u32 num_relocs); + u32 num_cmdbufs, u32 num_relocs, + bool skip_firewall); void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset); +void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, + bool relative, u32 next_class); struct host1x_job *host1x_job_get(struct host1x_job *job); void host1x_job_put(struct host1x_job *job); int host1x_job_pin(struct host1x_job *job, struct device *dev); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bb5e7b0a4274..0ee140176f10 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -318,16 +318,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -extern void clock_was_set_delayed(void); - extern unsigned int hrtimer_resolution; #else #define hrtimer_resolution (unsigned int)LOW_RES_NSEC -static inline void clock_was_set_delayed(void) { } - #endif static inline ktime_t @@ -351,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) timer->base->get_time()); } -extern void clock_was_set(void); #ifdef CONFIG_TIMERFD extern void timerfd_clock_was_set(void); +extern void timerfd_resume(void); #else static inline void timerfd_clock_was_set(void) { } +static inline void timerfd_resume(void) { } #endif -extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9626fda5efce..f123e15d966e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -10,8 +10,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, - struct vm_area_struct *vma); -void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); + struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); +void huge_pmd_set_accessed(struct vm_fault *vmf); int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma); @@ -24,7 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) } #endif -vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); +vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); @@ -115,9 +115,34 @@ extern struct kobj_attribute shmem_enabled_attr; extern unsigned long transparent_hugepage_flags; +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + /* Don't have to check pgoff for anonymous vma */ + if (!vma_is_anonymous(vma)) { + if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, + HPAGE_PMD_NR)) + return false; + } + + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + return false; + return true; +} + +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + /* Explicitly disabled through madvise. */ + if ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + return false; + return true; +} + /* * to be used on vmas which are known to support THP. - * Use transparent_hugepage_enabled otherwise + * Use transparent_hugepage_active otherwise */ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { @@ -128,15 +153,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) return false; - if (vma->vm_flags & VM_NOHUGEPAGE) + if (!transhuge_vma_enabled(vma, vma->vm_flags)) return false; if (vma_is_temporary_stack(vma)) return false; - if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) - return false; - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; @@ -150,24 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -bool transparent_hugepage_enabled(struct vm_area_struct *vma); - -#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) - -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long haddr) -{ - /* Don't have to check pgoff for anonymous vma */ - if (!vma_is_anonymous(vma)) { - if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != - (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) - return false; - } - - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) - return false; - return true; -} +bool transparent_hugepage_active(struct vm_area_struct *vma); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -283,9 +288,10 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap); -vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); +vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); extern struct page *huge_zero_page; +extern unsigned long huge_zero_pfn; static inline bool is_huge_zero_page(struct page *page) { @@ -294,7 +300,7 @@ static inline bool is_huge_zero_page(struct page *page) static inline bool is_huge_zero_pmd(pmd_t pmd) { - return is_huge_zero_page(pmd_page(pmd)); + return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); } static inline bool is_huge_zero_pud(pud_t pud) @@ -353,7 +359,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) +static inline bool transparent_hugepage_active(struct vm_area_struct *vma) { return false; } @@ -364,6 +370,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, return false; } +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return false; +} + static inline void prep_transhuge_page(struct page *page) {} static inline bool is_transparent_hugepage(struct page *page) @@ -429,8 +441,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, return NULL; } -static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, - pmd_t orig_pmd) +static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) { return 0; } @@ -440,6 +451,11 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline bool is_huge_zero_pmd(pmd_t pmd) +{ + return false; +} + static inline bool is_huge_zero_pud(pud_t pud) { return false; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b92f25ccef58..1faebe1cd0ed 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -29,12 +29,29 @@ typedef struct { unsigned long pd; } hugepd_t; #include <linux/shm.h> #include <asm/tlbflush.h> +/* + * For HugeTLB page, there are more metadata to save in the struct page. But + * the head struct page cannot meet our needs, so we have to abuse other tail + * struct page to store the metadata. In order to avoid conflicts caused by + * subsequent use of more tail struct pages, we gather these discrete indexes + * of tail struct page here. + */ +enum { + SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */ +#ifdef CONFIG_CGROUP_HUGETLB + SUBPAGE_INDEX_CGROUP, /* reuse page->private */ + SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ + __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, +#endif + __NR_USED_SUBPAGE, +}; + struct hugepage_subpool { spinlock_t lock; long count; long max_hpages; /* Maximum huge pages or -1 if no maximum. */ long used_hpages; /* Used count against maximum, includes */ - /* both alloced and reserved pages. */ + /* both allocated and reserved pages. */ struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ @@ -68,7 +85,7 @@ struct resv_map { * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page - * indicies into the associated mapping. from indicates the starting index + * indices into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents @@ -149,6 +166,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool isolate_huge_page(struct page *page, struct list_head *list); +int get_hwpoison_huge_page(struct page *page, bool *hugetlb); void putback_active_hugepage(struct page *page); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); @@ -339,6 +357,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list) return false; } +static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) +{ + return 0; +} + static inline void putback_active_hugepage(struct page *page) { } @@ -445,7 +468,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, - struct user_struct **user, int creat_flags, + struct ucounts **ucounts, int creat_flags, int page_size_log); static inline bool is_file_hugepages(struct file *file) @@ -465,7 +488,7 @@ static inline struct hstate *hstate_inode(struct inode *i) #define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, - struct user_struct **user, int creat_flags, + struct ucounts **ucounts, int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); @@ -509,12 +532,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * modifications require hugetlb_lock. * HPG_freed - Set when page is on the free lists. * Synchronization: hugetlb_lock held for examination and modification. + * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, HPG_temporary, HPG_freed, + HPG_vmemmap_optimized, __NR_HPAGEFLAGS, }; @@ -560,6 +585,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) +HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) #ifdef CONFIG_HUGETLB_PAGE @@ -582,6 +608,9 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP + unsigned int nr_free_vmemmap_pages; +#endif #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ struct cftype cgroup_files_dfl[7]; @@ -604,6 +633,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); +void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, + unsigned long address, struct page *page); /* arch callback */ int __init __alloc_bootmem_huge_page(struct hstate *h); @@ -627,13 +658,13 @@ extern unsigned int default_hstate_idx; */ static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) { - return (struct hugepage_subpool *)(hpage+1)->private; + return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); } static inline void hugetlb_set_page_subpool(struct page *hpage, struct hugepage_subpool *subpool) { - set_page_private(hpage+1, (unsigned long)subpool); + set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); } static inline struct hstate *hstate_file(struct file *f) @@ -710,8 +741,8 @@ static inline void arch_clear_hugepage_flags(struct page *page) { } #endif #ifndef arch_make_huge_pte -static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, - struct page *page, int writable) +static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags) { return entry; } @@ -733,17 +764,6 @@ static inline int hstate_index(struct hstate *h) return h - hstates; } -pgoff_t __basepage_index(struct page *page); - -/* Return page->index in PAGE_SIZE units */ -static inline pgoff_t basepage_index(struct page *page) -{ - if (!PageCompound(page)) - return page->index; - - return __basepage_index(page); -} - extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); @@ -777,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h) * It determines whether or not a huge page should be placed on * movable zone or not. Movability of any huge page should be * required only if huge page size is supported for migration. - * There wont be any reason for the huge page to be movable if + * There won't be any reason for the huge page to be movable if * it is not migratable to start with. Also the size of the huge * page should be large enough to be placed under a movable zone * and still feasible enough to be migratable. Just the presence @@ -838,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); +static inline void hugetlb_count_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->hugetlb_usage, 0); +} + static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); @@ -878,6 +903,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { @@ -980,11 +1010,6 @@ static inline int hstate_index(struct hstate *h) return 0; } -static inline pgoff_t basepage_index(struct page *page) -{ - return page->index; -} - static inline int dissolve_free_huge_page(struct page *page) { return 0; @@ -1022,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, return &mm->page_table_lock; } +static inline void hugetlb_count_init(struct mm_struct *mm) +{ +} + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } @@ -1036,6 +1065,12 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr } #endif /* CONFIG_HUGETLB_PAGE */ +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +extern bool hugetlb_free_vmemmap_enabled; +#else +#define hugetlb_free_vmemmap_enabled false +#endif + static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0bff345c4bc6..c137396129db 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -21,15 +21,16 @@ struct hugetlb_cgroup; struct resv_map; struct file_region; +#ifdef CONFIG_CGROUP_HUGETLB /* * Minimum page order trackable by hugetlb cgroup. * At least 4 pages are necessary for all the tracking information. - * The second tail page (hpage[2]) is the fault usage cgroup. - * The third tail page (hpage[3]) is the reservation usage cgroup. + * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault + * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD]) + * is the reservation usage cgroup. */ -#define HUGETLB_CGROUP_MIN_ORDER 2 +#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1) -#ifdef CONFIG_CGROUP_HUGETLB enum hugetlb_memory_event { HUGETLB_MAX, HUGETLB_NR_MEMORY_EVENTS, @@ -66,9 +67,9 @@ __hugetlb_cgroup_from_page(struct page *page, bool rsvd) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; if (rsvd) - return (struct hugetlb_cgroup *)page[3].private; + return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); else - return (struct hugetlb_cgroup *)page[2].private; + return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); } static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) @@ -90,9 +91,11 @@ static inline int __set_hugetlb_cgroup(struct page *page, if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; if (rsvd) - page[3].private = (unsigned long)h_cg; + set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD, + (unsigned long)h_cg); else - page[2].private = (unsigned long)h_cg; + set_page_private(page + SUBPAGE_INDEX_CGROUP, + (unsigned long)h_cg); return 0; } @@ -118,6 +121,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) css_put(&h_cg->css); } +static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ + if (resv_map->css) + css_get(resv_map->css); +} + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, @@ -196,6 +206,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) { } +static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ +} + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index d1e59dbef1dd..ddc8713ce57b 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -181,6 +181,10 @@ struct hv_ring_buffer_info { * being freed while the ring buffer is being accessed. */ struct mutex ring_buffer_mutex; + + /* Buffer that holds a copy of an incoming host packet */ + void *pkt_buffer; + u32 pkt_buffer_size; }; @@ -534,12 +538,6 @@ struct vmbus_channel_rescind_offer { u32 child_relid; } __packed; -static inline u32 -hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) -{ - return rbi->ring_buffer->pending_send_sz; -} - /* * Request Offer -- no parameters, SynIC message contains the partition ID * Set Snoop -- no parameters, SynIC message contains the partition ID @@ -790,7 +788,11 @@ struct vmbus_requestor { #define VMBUS_NO_RQSTOR U64_MAX #define VMBUS_RQST_ERROR (U64_MAX - 1) +/* NetVSC-specific */ #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) +/* StorVSC-specific */ +#define VMBUS_RQST_INIT (U64_MAX - 2) +#define VMBUS_RQST_RESET (U64_MAX - 3) struct vmbus_device { u16 dev_type; @@ -799,6 +801,8 @@ struct vmbus_device { bool allowed_in_isolated; }; +#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 + struct vmbus_channel { struct list_head listentry; @@ -1018,13 +1022,21 @@ struct vmbus_channel { u32 fuzz_testing_interrupt_delay; u32 fuzz_testing_message_delay; + /* callback to generate a request ID from a request address */ + u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr); + /* callback to retrieve a request address from a request ID */ + u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id); + /* request/transaction ids for VMBus */ struct vmbus_requestor requestor; u32 rqstor_size; + + /* The max size of a packet on this channel */ + u32 max_pkt_size; }; -u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr); -u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id); +u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr); +u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id); static inline bool is_hvsock_channel(const struct vmbus_channel *c) { @@ -1074,16 +1086,6 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c, c->outbound.ring_buffer->pending_send_sz = size; } -static inline void set_low_latency_mode(struct vmbus_channel *c) -{ - c->low_latency = true; -} - -static inline void clear_low_latency_mode(struct vmbus_channel *c) -{ - c->low_latency = false; -} - void vmbus_onmessage(struct vmbus_channel_message_header *hdr); int vmbus_request_offers(void); @@ -1663,31 +1665,54 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) struct vmpacket_descriptor * +hv_pkt_iter_first_raw(struct vmbus_channel *channel); + +struct vmpacket_descriptor * hv_pkt_iter_first(struct vmbus_channel *channel); struct vmpacket_descriptor * __hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt); + const struct vmpacket_descriptor *pkt, + bool copy); void hv_pkt_iter_close(struct vmbus_channel *channel); -/* - * Get next packet descriptor from iterator - * If at end of list, return NULL and update host. - */ static inline struct vmpacket_descriptor * -hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *pkt) +hv_pkt_iter_next_pkt(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt, + bool copy) { struct vmpacket_descriptor *nxt; - nxt = __hv_pkt_iter_next(channel, pkt); + nxt = __hv_pkt_iter_next(channel, pkt, copy); if (!nxt) hv_pkt_iter_close(channel); return nxt; } +/* + * Get next packet descriptor without copying it out of the ring buffer + * If at end of list, return NULL and update host. + */ +static inline struct vmpacket_descriptor * +hv_pkt_iter_next_raw(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt) +{ + return hv_pkt_iter_next_pkt(channel, pkt, false); +} + +/* + * Get next packet descriptor from iterator + * If at end of list, return NULL and update host. + */ +static inline struct vmpacket_descriptor * +hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt) +{ + return hv_pkt_iter_next_pkt(channel, pkt, true); +} + #define foreach_vmbus_pkt(pkt, channel) \ for (pkt = hv_pkt_iter_first(channel); pkt; \ pkt = hv_pkt_iter_next(channel, pkt)) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e8f2ac8c9c3d..2ce3efbe9198 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -15,6 +15,7 @@ #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +#include <linux/regulator/consumer.h> #include <linux/rtmutex.h> #include <linux/irqdomain.h> /* for Host Notify IRQ */ #include <linux/of.h> /* for struct device_node */ @@ -147,6 +148,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, /* Now follow the 'nice' access routines. These also document the calling conventions of i2c_smbus_xfer. */ +u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count); s32 i2c_smbus_read_byte(const struct i2c_client *client); s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command); @@ -343,7 +345,6 @@ struct i2c_client { }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) -struct i2c_client *i2c_verify_client(struct device *dev); struct i2c_adapter *i2c_verify_adapter(struct device *dev); const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client); @@ -477,6 +478,13 @@ i2c_new_ancillary_device(struct i2c_client *client, u16 default_addr); void i2c_unregister_device(struct i2c_client *client); + +struct i2c_client *i2c_verify_client(struct device *dev); +#else +static inline struct i2c_client *i2c_verify_client(struct device *dev) +{ + return NULL; +} #endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. @@ -729,6 +737,7 @@ struct i2c_adapter { const struct i2c_adapter_quirks *quirks; struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -1001,6 +1010,7 @@ struct acpi_resource_i2c_serialbus; #if IS_ENABLED(CONFIG_ACPI) bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c); +int i2c_acpi_client_count(struct acpi_device *adev); u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); @@ -1011,6 +1021,10 @@ static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, { return false; } +static inline int i2c_acpi_client_count(struct acpi_device *adev) +{ + return 0; +} static inline u32 i2c_acpi_find_bus_speed(struct device *dev) { return 0; diff --git a/include/linux/ide.h b/include/linux/ide.h deleted file mode 100644 index 2c300689a51a..000000000000 --- a/include/linux/ide.h +++ /dev/null @@ -1,1623 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _IDE_H -#define _IDE_H -/* - * linux/include/linux/ide.h - * - * Copyright (C) 1994-2002 Linus Torvalds & authors - */ - -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/ata.h> -#include <linux/blk-mq.h> -#include <linux/proc_fs.h> -#include <linux/interrupt.h> -#include <linux/bitops.h> -#include <linux/bio.h> -#include <linux/pci.h> -#include <linux/completion.h> -#include <linux/pm.h> -#include <linux/mutex.h> -/* for request_sense */ -#include <linux/cdrom.h> -#include <scsi/scsi_cmnd.h> -#include <asm/byteorder.h> -#include <asm/io.h> - -/* - * Probably not wise to fiddle with these - */ -#define SUPPORT_VLB_SYNC 1 -#define IDE_DEFAULT_MAX_FAILURES 1 -#define ERROR_MAX 8 /* Max read/write errors per sector */ -#define ERROR_RESET 3 /* Reset controller every 4th retry */ -#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ - -struct device; - -/* values for ide_request.type */ -enum ata_priv_type { - ATA_PRIV_MISC, - ATA_PRIV_TASKFILE, - ATA_PRIV_PC, - ATA_PRIV_SENSE, /* sense request */ - ATA_PRIV_PM_SUSPEND, /* suspend request */ - ATA_PRIV_PM_RESUME, /* resume request */ -}; - -struct ide_request { - struct scsi_request sreq; - u8 sense[SCSI_SENSE_BUFFERSIZE]; - u8 type; - void *special; -}; - -static inline struct ide_request *ide_req(struct request *rq) -{ - return blk_mq_rq_to_pdu(rq); -} - -static inline bool ata_misc_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC; -} - -static inline bool ata_taskfile_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE; -} - -static inline bool ata_pc_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC; -} - -static inline bool ata_sense_request(struct request *rq) -{ - return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE; -} - -static inline bool ata_pm_request(struct request *rq) -{ - return blk_rq_is_private(rq) && - (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND || - ide_req(rq)->type == ATA_PRIV_PM_RESUME); -} - -/* Error codes returned in result to the higher part of the driver. */ -enum { - IDE_DRV_ERROR_GENERAL = 101, - IDE_DRV_ERROR_FILEMARK = 102, - IDE_DRV_ERROR_EOD = 103, -}; - -/* - * Definitions for accessing IDE controller registers - */ -#define IDE_NR_PORTS (10) - -struct ide_io_ports { - unsigned long data_addr; - - union { - unsigned long error_addr; /* read: error */ - unsigned long feature_addr; /* write: feature */ - }; - - unsigned long nsect_addr; - unsigned long lbal_addr; - unsigned long lbam_addr; - unsigned long lbah_addr; - - unsigned long device_addr; - - union { - unsigned long status_addr; /* read: status */ - unsigned long command_addr; /* write: command */ - }; - - unsigned long ctl_addr; - - unsigned long irq_addr; -}; - -#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) - -#define BAD_R_STAT (ATA_BUSY | ATA_ERR) -#define BAD_W_STAT (BAD_R_STAT | ATA_DF) -#define BAD_STAT (BAD_R_STAT | ATA_DRQ) -#define DRIVE_READY (ATA_DRDY | ATA_DSC) - -#define BAD_CRC (ATA_ABORTED | ATA_ICRC) - -#define SATA_NR_PORTS (3) /* 16 possible ?? */ - -#define SATA_STATUS_OFFSET (0) -#define SATA_ERROR_OFFSET (1) -#define SATA_CONTROL_OFFSET (2) - -/* - * Our Physical Region Descriptor (PRD) table should be large enough - * to handle the biggest I/O request we are likely to see. Since requests - * can have no more than 256 sectors, and since the typical blocksize is - * two or more sectors, we could get by with a limit of 128 entries here for - * the usual worst case. Most requests seem to include some contiguous blocks, - * further reducing the number of table entries required. - * - * The driver reverts to PIO mode for individual requests that exceed - * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling - * 100% of all crazy scenarios here is not necessary. - * - * As it turns out though, we must allocate a full 4KB page for this, - * so the two PRD tables (ide0 & ide1) will each get half of that, - * allowing each to have about 256 entries (8 bytes each) from this. - */ -#define PRD_BYTES 8 -#define PRD_ENTRIES 256 - -/* - * Some more useful definitions - */ -#define PARTN_BITS 6 /* number of minor dev bits for partitions */ -#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ - -/* - * Timeouts for various operations: - */ -enum { - /* spec allows up to 20ms, but CF cards and SSD drives need more */ - WAIT_DRQ = 1 * HZ, /* 1s */ - /* some laptops are very slow */ - WAIT_READY = 5 * HZ, /* 5s */ - /* should be less than 3ms (?), if all ATAPI CD is closed at boot */ - WAIT_PIDENTIFY = 10 * HZ, /* 10s */ - /* worst case when spinning up */ - WAIT_WORSTCASE = 30 * HZ, /* 30s */ - /* maximum wait for an IRQ to happen */ - WAIT_CMD = 10 * HZ, /* 10s */ - /* Some drives require a longer IRQ timeout. */ - WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */ - /* - * Some drives (for example, Seagate STT3401A Travan) require a very - * long timeout, because they don't return an interrupt or clear their - * BSY bit until after the command completes (even retension commands). - */ - WAIT_TAPE_CMD = 900 * HZ, /* 900s */ - /* minimum sleep time */ - WAIT_MIN_SLEEP = HZ / 50, /* 20ms */ -}; - -/* - * Op codes for special requests to be handled by ide_special_rq(). - * Values should be in the range of 0x20 to 0x3f. - */ -#define REQ_DRIVE_RESET 0x20 -#define REQ_DEVSET_EXEC 0x21 -#define REQ_PARK_HEADS 0x22 -#define REQ_UNPARK_HEADS 0x23 - -/* - * hwif_chipset_t is used to keep track of the specific hardware - * chipset used by each IDE interface, if known. - */ -enum { ide_unknown, ide_generic, ide_pci, - ide_cmd640, ide_dtc2278, ide_ali14xx, - ide_qd65xx, ide_umc8672, ide_ht6560b, - ide_4drives, ide_pmac, ide_acorn, - ide_au1xxx, ide_palm3710 -}; - -typedef u8 hwif_chipset_t; - -/* - * Structure to hold all information about the location of this port - */ -struct ide_hw { - union { - struct ide_io_ports io_ports; - unsigned long io_ports_array[IDE_NR_PORTS]; - }; - - int irq; /* our irq number */ - struct device *dev, *parent; - unsigned long config; -}; - -static inline void ide_std_init_ports(struct ide_hw *hw, - unsigned long io_addr, - unsigned long ctl_addr) -{ - unsigned int i; - - for (i = 0; i <= 7; i++) - hw->io_ports_array[i] = io_addr++; - - hw->io_ports.ctl_addr = ctl_addr; -} - -#define MAX_HWIFS 10 - -/* - * Now for the data we need to maintain per-drive: ide_drive_t - */ - -#define ide_scsi 0x21 -#define ide_disk 0x20 -#define ide_optical 0x7 -#define ide_cdrom 0x5 -#define ide_tape 0x1 -#define ide_floppy 0x0 - -/* - * Special Driver Flags - */ -enum { - IDE_SFLAG_SET_GEOMETRY = BIT(0), - IDE_SFLAG_RECALIBRATE = BIT(1), - IDE_SFLAG_SET_MULTMODE = BIT(2), -}; - -/* - * Status returned from various ide_ functions - */ -typedef enum { - ide_stopped, /* no drive operation was started */ - ide_started, /* a drive operation was started, handler was set */ -} ide_startstop_t; - -enum { - IDE_VALID_ERROR = BIT(1), - IDE_VALID_FEATURE = IDE_VALID_ERROR, - IDE_VALID_NSECT = BIT(2), - IDE_VALID_LBAL = BIT(3), - IDE_VALID_LBAM = BIT(4), - IDE_VALID_LBAH = BIT(5), - IDE_VALID_DEVICE = BIT(6), - IDE_VALID_LBA = IDE_VALID_LBAL | - IDE_VALID_LBAM | - IDE_VALID_LBAH, - IDE_VALID_OUT_TF = IDE_VALID_FEATURE | - IDE_VALID_NSECT | - IDE_VALID_LBA, - IDE_VALID_IN_TF = IDE_VALID_NSECT | - IDE_VALID_LBA, - IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF, - IDE_VALID_IN_HOB = IDE_VALID_ERROR | - IDE_VALID_NSECT | - IDE_VALID_LBA, -}; - -enum { - IDE_TFLAG_LBA48 = BIT(0), - IDE_TFLAG_WRITE = BIT(1), - IDE_TFLAG_CUSTOM_HANDLER = BIT(2), - IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3), - /* force 16-bit I/O operations */ - IDE_TFLAG_IO_16BIT = BIT(4), - /* struct ide_cmd was allocated using kmalloc() */ - IDE_TFLAG_DYN = BIT(5), - IDE_TFLAG_FS = BIT(6), - IDE_TFLAG_MULTI_PIO = BIT(7), - IDE_TFLAG_SET_XFER = BIT(8), -}; - -enum { - IDE_FTFLAG_FLAGGED = BIT(0), - IDE_FTFLAG_SET_IN_FLAGS = BIT(1), - IDE_FTFLAG_OUT_DATA = BIT(2), - IDE_FTFLAG_IN_DATA = BIT(3), -}; - -struct ide_taskfile { - u8 data; /* 0: data byte (for TASKFILE ioctl) */ - union { /* 1: */ - u8 error; /* read: error */ - u8 feature; /* write: feature */ - }; - u8 nsect; /* 2: number of sectors */ - u8 lbal; /* 3: LBA low */ - u8 lbam; /* 4: LBA mid */ - u8 lbah; /* 5: LBA high */ - u8 device; /* 6: device select */ - union { /* 7: */ - u8 status; /* read: status */ - u8 command; /* write: command */ - }; -}; - -struct ide_cmd { - struct ide_taskfile tf; - struct ide_taskfile hob; - struct { - struct { - u8 tf; - u8 hob; - } out, in; - } valid; - - u16 tf_flags; - u8 ftf_flags; /* for TASKFILE ioctl */ - int protocol; - - int sg_nents; /* number of sg entries */ - int orig_sg_nents; - int sg_dma_direction; /* DMA transfer direction */ - - unsigned int nbytes; - unsigned int nleft; - unsigned int last_xfer_len; - - struct scatterlist *cursg; - unsigned int cursg_ofs; - - struct request *rq; /* copy of request */ -}; - -/* ATAPI packet command flags */ -enum { - /* set when an error is considered normal - no retry (ide-tape) */ - PC_FLAG_ABORT = BIT(0), - PC_FLAG_SUPPRESS_ERROR = BIT(1), - PC_FLAG_WAIT_FOR_DSC = BIT(2), - PC_FLAG_DMA_OK = BIT(3), - PC_FLAG_DMA_IN_PROGRESS = BIT(4), - PC_FLAG_DMA_ERROR = BIT(5), - PC_FLAG_WRITING = BIT(6), -}; - -#define ATAPI_WAIT_PC (60 * HZ) - -struct ide_atapi_pc { - /* actual packet bytes */ - u8 c[12]; - /* incremented on each retry */ - int retries; - int error; - - /* bytes to transfer */ - int req_xfer; - - /* the corresponding request */ - struct request *rq; - - unsigned long flags; - - /* - * those are more or less driver-specific and some of them are subject - * to change/removal later. - */ - unsigned long timeout; -}; - -struct ide_devset; -struct ide_driver; - -#ifdef CONFIG_BLK_DEV_IDEACPI -struct ide_acpi_drive_link; -struct ide_acpi_hwif_link; -#endif - -struct ide_drive_s; - -struct ide_disk_ops { - int (*check)(struct ide_drive_s *, const char *); - int (*get_capacity)(struct ide_drive_s *); - void (*unlock_native_capacity)(struct ide_drive_s *); - void (*setup)(struct ide_drive_s *); - void (*flush)(struct ide_drive_s *); - int (*init_media)(struct ide_drive_s *, struct gendisk *); - int (*set_doorlock)(struct ide_drive_s *, struct gendisk *, - int); - ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, - sector_t); - int (*ioctl)(struct ide_drive_s *, struct block_device *, - fmode_t, unsigned int, unsigned long); - int (*compat_ioctl)(struct ide_drive_s *, struct block_device *, - fmode_t, unsigned int, unsigned long); -}; - -/* ATAPI device flags */ -enum { - IDE_AFLAG_DRQ_INTERRUPT = BIT(0), - - /* ide-cd */ - /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = BIT(1), - /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = BIT(2), - /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = BIT(3), - /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4), - /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = BIT(6), - /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = BIT(7), - /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = BIT(8), - IDE_AFLAG_VERTOS_300_SSD = BIT(9), - IDE_AFLAG_VERTOS_600_ESD = BIT(10), - IDE_AFLAG_SANYO_3CD = BIT(11), - IDE_AFLAG_FULL_CAPS_PAGE = BIT(12), - IDE_AFLAG_PLAY_AUDIO_OK = BIT(13), - IDE_AFLAG_LE_SPEED_FIELDS = BIT(14), - - /* ide-floppy */ - /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = BIT(15), - /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = BIT(16), - /* Supports format progress report */ - IDE_AFLAG_SRFP = BIT(17), - - /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = BIT(18), - /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = BIT(19), - /* Device already opened */ - IDE_AFLAG_BUSY = BIT(20), - /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = BIT(21), - /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = BIT(22), - /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = BIT(23), - - IDE_AFLAG_NO_AUTOCLOSE = BIT(24), -}; - -/* device flags */ -enum { - /* restore settings after device reset */ - IDE_DFLAG_KEEP_SETTINGS = BIT(0), - /* device is using DMA for read/write */ - IDE_DFLAG_USING_DMA = BIT(1), - /* okay to unmask other IRQs */ - IDE_DFLAG_UNMASK = BIT(2), - /* don't attempt flushes */ - IDE_DFLAG_NOFLUSH = BIT(3), - /* DSC overlap */ - IDE_DFLAG_DSC_OVERLAP = BIT(4), - /* give potential excess bandwidth */ - IDE_DFLAG_NICE1 = BIT(5), - /* device is physically present */ - IDE_DFLAG_PRESENT = BIT(6), - /* disable Host Protected Area */ - IDE_DFLAG_NOHPA = BIT(7), - /* id read from device (synthetic if not set) */ - IDE_DFLAG_ID_READ = BIT(8), - IDE_DFLAG_NOPROBE = BIT(9), - /* need to do check_media_change() */ - IDE_DFLAG_REMOVABLE = BIT(10), - IDE_DFLAG_FORCED_GEOM = BIT(12), - /* disallow setting unmask bit */ - IDE_DFLAG_NO_UNMASK = BIT(13), - /* disallow enabling 32-bit I/O */ - IDE_DFLAG_NO_IO_32BIT = BIT(14), - /* for removable only: door lock/unlock works */ - IDE_DFLAG_DOORLOCKING = BIT(15), - /* disallow DMA */ - IDE_DFLAG_NODMA = BIT(16), - /* powermanagement told us not to do anything, so sleep nicely */ - IDE_DFLAG_BLOCKED = BIT(17), - /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = BIT(18), - IDE_DFLAG_POST_RESET = BIT(19), - IDE_DFLAG_UDMA33_WARNED = BIT(20), - IDE_DFLAG_LBA48 = BIT(21), - /* status of write cache */ - IDE_DFLAG_WCACHE = BIT(22), - /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = BIT(23), - /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = BIT(24), - IDE_DFLAG_LBA = BIT(25), - /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = BIT(26), - /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = BIT(27), - IDE_DFLAG_MEDIA_CHANGED = BIT(28), - /* write protect */ - IDE_DFLAG_WP = BIT(29), - IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30), - IDE_DFLAG_NIEN_QUIRK = BIT(31), -}; - -struct ide_drive_s { - char name[4]; /* drive name, such as "hda" */ - char driver_req[10]; /* requests specific driver */ - - struct request_queue *queue; /* request queue */ - - bool (*prep_rq)(struct ide_drive_s *, struct request *); - - struct blk_mq_tag_set tag_set; - - struct request *rq; /* current request */ - void *driver_data; /* extra driver data */ - u16 *id; /* identification info */ -#ifdef CONFIG_IDE_PROC_FS - struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ - const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */ -#endif - struct hwif_s *hwif; /* actually (ide_hwif_t *) */ - - const struct ide_disk_ops *disk_ops; - - unsigned long dev_flags; - - unsigned long sleep; /* sleep until this time */ - unsigned long timeout; /* max time to wait for irq */ - - u8 special_flags; /* special action flags */ - - u8 select; /* basic drive/head select reg value */ - u8 retry_pio; /* retrying dma capable host in pio */ - u8 waiting_for_dma; /* dma currently in progress */ - u8 dma; /* atapi dma flag */ - - u8 init_speed; /* transfer rate set at boot */ - u8 current_speed; /* current transfer rate set */ - u8 desired_speed; /* desired transfer rate set */ - u8 pio_mode; /* for ->set_pio_mode _only_ */ - u8 dma_mode; /* for ->set_dma_mode _only_ */ - u8 dn; /* now wide spread use */ - u8 acoustic; /* acoustic management */ - u8 media; /* disk, cdrom, tape, floppy, ... */ - u8 ready_stat; /* min status value for drive ready */ - u8 mult_count; /* current multiple sector setting */ - u8 mult_req; /* requested multiple sector setting */ - u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ - u8 bad_wstat; /* used for ignoring ATA_DF */ - u8 head; /* "real" number of heads */ - u8 sect; /* "real" sectors per track */ - u8 bios_head; /* BIOS/fdisk/LILO number of heads */ - u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */ - - /* delay this long before sending packet command */ - u8 pc_delay; - - unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ - unsigned int cyl; /* "real" number of cyls */ - void *drive_data; /* used by set_pio_mode/dev_select() */ - unsigned int failures; /* current failure count */ - unsigned int max_failures; /* maximum allowed failure count */ - u64 probed_capacity;/* initial/native media capacity */ - u64 capacity64; /* total number of sectors */ - - int lun; /* logical unit */ - int crc_count; /* crc counter to reduce drive speed */ - - unsigned long debug_mask; /* debugging levels switch */ - -#ifdef CONFIG_BLK_DEV_IDEACPI - struct ide_acpi_drive_link *acpidata; -#endif - struct list_head list; - struct device gendev; - struct completion gendev_rel_comp; /* to deal with device release() */ - - /* current packet command */ - struct ide_atapi_pc *pc; - - /* last failed packet command */ - struct ide_atapi_pc *failed_pc; - - /* callback for packet commands */ - int (*pc_callback)(struct ide_drive_s *, int); - - ide_startstop_t (*irq_handler)(struct ide_drive_s *); - - unsigned long atapi_flags; - - struct ide_atapi_pc request_sense_pc; - - /* current sense rq and buffer */ - bool sense_rq_armed; - bool sense_rq_active; - struct request *sense_rq; - struct request_sense sense_data; - - /* async sense insertion */ - struct work_struct rq_work; - struct list_head rq_list; -}; - -typedef struct ide_drive_s ide_drive_t; - -#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev) - -#define to_ide_drv(obj, cont_type) \ - container_of(obj, struct cont_type, dev) - -#define ide_drv_g(disk, cont_type) \ - container_of((disk)->private_data, struct cont_type, driver) - -struct ide_port_info; - -struct ide_tp_ops { - void (*exec_command)(struct hwif_s *, u8); - u8 (*read_status)(struct hwif_s *); - u8 (*read_altstatus)(struct hwif_s *); - void (*write_devctl)(struct hwif_s *, u8); - - void (*dev_select)(ide_drive_t *); - void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8); - void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8); - - void (*input_data)(ide_drive_t *, struct ide_cmd *, - void *, unsigned int); - void (*output_data)(ide_drive_t *, struct ide_cmd *, - void *, unsigned int); -}; - -extern const struct ide_tp_ops default_tp_ops; - -/** - * struct ide_port_ops - IDE port operations - * - * @init_dev: host specific initialization of a device - * @set_pio_mode: routine to program host for PIO mode - * @set_dma_mode: routine to program host for DMA mode - * @reset_poll: chipset polling based on hba specifics - * @pre_reset: chipset specific changes to default for device-hba resets - * @resetproc: routine to reset controller after a disk reset - * @maskproc: special host masking for drive selection - * @quirkproc: check host's drive quirk list - * @clear_irq: clear IRQ - * - * @mdma_filter: filter MDMA modes - * @udma_filter: filter UDMA modes - * - * @cable_detect: detect cable type - */ -struct ide_port_ops { - void (*init_dev)(ide_drive_t *); - void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); - void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); - blk_status_t (*reset_poll)(ide_drive_t *); - void (*pre_reset)(ide_drive_t *); - void (*resetproc)(ide_drive_t *); - void (*maskproc)(ide_drive_t *, int); - void (*quirkproc)(ide_drive_t *); - void (*clear_irq)(ide_drive_t *); - int (*test_irq)(struct hwif_s *); - - u8 (*mdma_filter)(ide_drive_t *); - u8 (*udma_filter)(ide_drive_t *); - - u8 (*cable_detect)(struct hwif_s *); -}; - -struct ide_dma_ops { - void (*dma_host_set)(struct ide_drive_s *, int); - int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *); - void (*dma_start)(struct ide_drive_s *); - int (*dma_end)(struct ide_drive_s *); - int (*dma_test_irq)(struct ide_drive_s *); - void (*dma_lost_irq)(struct ide_drive_s *); - /* below ones are optional */ - int (*dma_check)(struct ide_drive_s *, struct ide_cmd *); - int (*dma_timer_expiry)(struct ide_drive_s *); - void (*dma_clear)(struct ide_drive_s *); - /* - * The following method is optional and only required to be - * implemented for the SFF-8038i compatible controllers. - */ - u8 (*dma_sff_read_status)(struct hwif_s *); -}; - -enum { - IDE_PFLAG_PROBING = BIT(0), -}; - -struct ide_host; - -typedef struct hwif_s { - struct hwif_s *mate; /* other hwif from same PCI chip */ - struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ - - struct ide_host *host; - - char name[6]; /* name of interface, eg. "ide0" */ - - struct ide_io_ports io_ports; - - unsigned long sata_scr[SATA_NR_PORTS]; - - ide_drive_t *devices[MAX_DRIVES + 1]; - - unsigned long port_flags; - - u8 major; /* our major number */ - u8 index; /* 0 for ide0; 1 for ide1; ... */ - u8 channel; /* for dual-port chips: 0=primary, 1=secondary */ - - u32 host_flags; - - u8 pio_mask; - - u8 ultra_mask; - u8 mwdma_mask; - u8 swdma_mask; - - u8 cbl; /* cable type */ - - hwif_chipset_t chipset; /* sub-module for tuning.. */ - - struct device *dev; - - void (*rw_disk)(ide_drive_t *, struct request *); - - const struct ide_tp_ops *tp_ops; - const struct ide_port_ops *port_ops; - const struct ide_dma_ops *dma_ops; - - /* dma physical region descriptor table (cpu view) */ - unsigned int *dmatable_cpu; - /* dma physical region descriptor table (dma view) */ - dma_addr_t dmatable_dma; - - /* maximum number of PRD table entries */ - int prd_max_nents; - /* PRD entry size in bytes */ - int prd_ent_size; - - /* Scatter-gather list used to build the above */ - struct scatterlist *sg_table; - int sg_max_nents; /* Maximum number of entries in it */ - - struct ide_cmd cmd; /* current command */ - - int rqsize; /* max sectors per request */ - int irq; /* our irq number */ - - unsigned long dma_base; /* base addr for dma ports */ - - unsigned long config_data; /* for use by chipset-specific code */ - unsigned long select_data; /* for use by chipset-specific code */ - - unsigned long extra_base; /* extra addr for dma ports */ - unsigned extra_ports; /* number of extra dma ports */ - - unsigned present : 1; /* this interface exists */ - unsigned busy : 1; /* serializes devices on a port */ - - struct device gendev; - struct device *portdev; - - struct completion gendev_rel_comp; /* To deal with device release() */ - - void *hwif_data; /* extra hwif data */ - -#ifdef CONFIG_BLK_DEV_IDEACPI - struct ide_acpi_hwif_link *acpidata; -#endif - - /* IRQ handler, if active */ - ide_startstop_t (*handler)(ide_drive_t *); - - /* BOOL: polling active & poll_timeout field valid */ - unsigned int polling : 1; - - /* current drive */ - ide_drive_t *cur_dev; - - /* current request */ - struct request *rq; - - /* failsafe timer */ - struct timer_list timer; - /* timeout value during long polls */ - unsigned long poll_timeout; - /* queried upon timeouts */ - int (*expiry)(ide_drive_t *); - - int req_gen; - int req_gen_timer; - - spinlock_t lock; -} ____cacheline_internodealigned_in_smp ide_hwif_t; - -#define MAX_HOST_PORTS 4 - -struct ide_host { - ide_hwif_t *ports[MAX_HOST_PORTS + 1]; - unsigned int n_ports; - struct device *dev[2]; - - int (*init_chipset)(struct pci_dev *); - - void (*get_lock)(irq_handler_t, void *); - void (*release_lock)(void); - - irq_handler_t irq_handler; - - unsigned long host_flags; - - int irq_flags; - - void *host_priv; - ide_hwif_t *cur_port; /* for hosts requiring serialization */ - - /* used for hosts requiring serialization */ - volatile unsigned long host_busy; -}; - -#define IDE_HOST_BUSY 0 - -/* - * internal ide interrupt handler type - */ -typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); -typedef int (ide_expiry_t)(ide_drive_t *); - -/* used by ide-cd, ide-floppy, etc. */ -typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned); - -extern struct mutex ide_setting_mtx; - -/* - * configurable drive settings - */ - -#define DS_SYNC BIT(0) - -struct ide_devset { - int (*get)(ide_drive_t *); - int (*set)(ide_drive_t *, int); - unsigned int flags; -}; - -#define __DEVSET(_flags, _get, _set) { \ - .flags = _flags, \ - .get = _get, \ - .set = _set, \ -} - -#define ide_devset_get(name, field) \ -static int get_##name(ide_drive_t *drive) \ -{ \ - return drive->field; \ -} - -#define ide_devset_set(name, field) \ -static int set_##name(ide_drive_t *drive, int arg) \ -{ \ - drive->field = arg; \ - return 0; \ -} - -#define ide_devset_get_flag(name, flag) \ -static int get_##name(ide_drive_t *drive) \ -{ \ - return !!(drive->dev_flags & flag); \ -} - -#define ide_devset_set_flag(name, flag) \ -static int set_##name(ide_drive_t *drive, int arg) \ -{ \ - if (arg) \ - drive->dev_flags |= flag; \ - else \ - drive->dev_flags &= ~flag; \ - return 0; \ -} - -#define __IDE_DEVSET(_name, _flags, _get, _set) \ -const struct ide_devset ide_devset_##_name = \ - __DEVSET(_flags, _get, _set) - -#define IDE_DEVSET(_name, _flags, _get, _set) \ -static __IDE_DEVSET(_name, _flags, _get, _set) - -#define ide_devset_rw(_name, _func) \ -IDE_DEVSET(_name, 0, get_##_func, set_##_func) - -#define ide_devset_w(_name, _func) \ -IDE_DEVSET(_name, 0, NULL, set_##_func) - -#define ide_ext_devset_rw(_name, _func) \ -__IDE_DEVSET(_name, 0, get_##_func, set_##_func) - -#define ide_ext_devset_rw_sync(_name, _func) \ -__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func) - -#define ide_decl_devset(_name) \ -extern const struct ide_devset ide_devset_##_name - -ide_decl_devset(io_32bit); -ide_decl_devset(keepsettings); -ide_decl_devset(pio_mode); -ide_decl_devset(unmaskirq); -ide_decl_devset(using_dma); - -#ifdef CONFIG_IDE_PROC_FS -/* - * /proc/ide interface - */ - -#define ide_devset_rw_field(_name, _field) \ -ide_devset_get(_name, _field); \ -ide_devset_set(_name, _field); \ -IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) - -#define ide_devset_ro_field(_name, _field) \ -ide_devset_get(_name, _field); \ -IDE_DEVSET(_name, 0, get_##_name, NULL) - -#define ide_devset_rw_flag(_name, _field) \ -ide_devset_get_flag(_name, _field); \ -ide_devset_set_flag(_name, _field); \ -IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) - -struct ide_proc_devset { - const char *name; - const struct ide_devset *setting; - int min, max; - int (*mulf)(ide_drive_t *); - int (*divf)(ide_drive_t *); -}; - -#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \ - .name = __stringify(_name), \ - .setting = &ide_devset_##_name, \ - .min = _min, \ - .max = _max, \ - .mulf = _mulf, \ - .divf = _divf, \ -} - -#define IDE_PROC_DEVSET(_name, _min, _max) \ -__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) - -typedef struct { - const char *name; - umode_t mode; - int (*show)(struct seq_file *, void *); -} ide_proc_entry_t; - -void proc_ide_create(void); -void proc_ide_destroy(void); -void ide_proc_register_port(ide_hwif_t *); -void ide_proc_port_register_devices(ide_hwif_t *); -void ide_proc_unregister_device(ide_drive_t *); -void ide_proc_unregister_port(ide_hwif_t *); -void ide_proc_register_driver(ide_drive_t *, struct ide_driver *); -void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *); - -int ide_capacity_proc_show(struct seq_file *m, void *v); -int ide_geometry_proc_show(struct seq_file *m, void *v); -#else -static inline void proc_ide_create(void) { ; } -static inline void proc_ide_destroy(void) { ; } -static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } -static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } -static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } -static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } -static inline void ide_proc_register_driver(ide_drive_t *drive, - struct ide_driver *driver) { ; } -static inline void ide_proc_unregister_driver(ide_drive_t *drive, - struct ide_driver *driver) { ; } -#endif - -enum { - /* enter/exit functions */ - IDE_DBG_FUNC = BIT(0), - /* sense key/asc handling */ - IDE_DBG_SENSE = BIT(1), - /* packet commands handling */ - IDE_DBG_PC = BIT(2), - /* request handling */ - IDE_DBG_RQ = BIT(3), - /* driver probing/setup */ - IDE_DBG_PROBE = BIT(4), -}; - -/* DRV_NAME has to be defined in the driver before using the macro below */ -#define __ide_debug_log(lvl, fmt, args...) \ -{ \ - if (unlikely(drive->debug_mask & lvl)) \ - printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \ - __func__, ## args); \ -} - -/* - * Power Management state machine (rq->pm->pm_step). - * - * For each step, the core calls ide_start_power_step() first. - * This can return: - * - ide_stopped : In this case, the core calls us back again unless - * step have been set to ide_power_state_completed. - * - ide_started : In this case, the channel is left busy until an - * async event (interrupt) occurs. - * Typically, ide_start_power_step() will issue a taskfile request with - * do_rw_taskfile(). - * - * Upon reception of the interrupt, the core will call ide_complete_power_step() - * with the error code if any. This routine should update the step value - * and return. It should not start a new request. The core will call - * ide_start_power_step() for the new step value, unless step have been - * set to IDE_PM_COMPLETED. - */ -enum { - IDE_PM_START_SUSPEND, - IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND, - IDE_PM_STANDBY, - - IDE_PM_START_RESUME, - IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME, - IDE_PM_IDLE, - IDE_PM_RESTORE_DMA, - - IDE_PM_COMPLETED, -}; - -int generic_ide_suspend(struct device *, pm_message_t); -int generic_ide_resume(struct device *); - -void ide_complete_power_step(ide_drive_t *, struct request *); -ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); -void ide_complete_pm_rq(ide_drive_t *, struct request *); -void ide_check_pm_state(ide_drive_t *, struct request *); - -/* - * Subdrivers support. - * - * The gendriver.owner field should be set to the module owner of this driver. - * The gendriver.name field should be set to the name of this driver - */ -struct ide_driver { - const char *version; - ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); - struct device_driver gen_driver; - int (*probe)(ide_drive_t *); - void (*remove)(ide_drive_t *); - void (*resume)(ide_drive_t *); - void (*shutdown)(ide_drive_t *); -#ifdef CONFIG_IDE_PROC_FS - ide_proc_entry_t * (*proc_entries)(ide_drive_t *); - const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *); -#endif -}; - -#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver) - -int ide_device_get(ide_drive_t *); -void ide_device_put(ide_drive_t *); - -struct ide_ioctl_devset { - unsigned int get_ioctl; - unsigned int set_ioctl; - const struct ide_devset *setting; -}; - -int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int, - unsigned long, const struct ide_ioctl_devset *); - -int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long); - -extern int ide_vlb_clk; -extern int ide_pci_clk; - -int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); -void ide_kill_rq(ide_drive_t *, struct request *); -void ide_insert_request_head(ide_drive_t *, struct request *); - -void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); -void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); - -void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *, - unsigned int); - -void ide_pad_transfer(ide_drive_t *, int, int); - -ide_startstop_t ide_error(ide_drive_t *, const char *, u8); - -void ide_fix_driveid(u16 *); - -extern void ide_fixstring(u8 *, const int, const int); - -int ide_busy_sleep(ide_drive_t *, unsigned long, int); - -int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *); -int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); - -ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *); -ide_startstop_t ide_do_devset(ide_drive_t *, struct request *); - -extern ide_startstop_t ide_do_reset (ide_drive_t *); - -extern int ide_devset_execute(ide_drive_t *drive, - const struct ide_devset *setting, int arg); - -void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); -int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int); - -void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); -void ide_tf_dump(const char *, struct ide_cmd *); - -void ide_exec_command(ide_hwif_t *, u8); -u8 ide_read_status(ide_hwif_t *); -u8 ide_read_altstatus(ide_hwif_t *); -void ide_write_devctl(ide_hwif_t *, u8); - -void ide_dev_select(ide_drive_t *); -void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8); -void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8); - -void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); -void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); - -void SELECT_MASK(ide_drive_t *, int); - -u8 ide_read_error(ide_drive_t *); -void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); - -int ide_check_ireason(ide_drive_t *, struct request *, int, int, int); - -int ide_check_atapi_device(ide_drive_t *, const char *); - -void ide_init_pc(struct ide_atapi_pc *); - -/* Disk head parking */ -extern wait_queue_head_t ide_park_wq; -ssize_t ide_park_show(struct device *dev, struct device_attribute *attr, - char *buf); -ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len); - -/* - * Special requests for ide-tape block device strategy routine. - * - * In order to service a character device command, we add special requests to - * the tail of our block device request queue and wait for their completion. - */ -enum { - REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */ - REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */ - REQ_IDETAPE_READ = BIT(2), - REQ_IDETAPE_WRITE = BIT(3), -}; - -int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, - void *, unsigned int); - -int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); -int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); -int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); -void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); -void ide_retry_pc(ide_drive_t *drive); - -void ide_prep_sense(ide_drive_t *drive, struct request *rq); -int ide_queue_sense_rq(ide_drive_t *drive, void *special); - -int ide_cd_expiry(ide_drive_t *); - -int ide_cd_get_xferlen(struct request *); - -ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *); - -ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); - -void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int); - -void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); - -int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); -int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *); - -int ide_taskfile_ioctl(ide_drive_t *, unsigned long); - -int ide_dev_read_id(ide_drive_t *, u8, u16 *, int); - -extern int ide_driveid_update(ide_drive_t *); -extern int ide_config_drive_speed(ide_drive_t *, u8); -extern u8 eighty_ninty_three (ide_drive_t *); -extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *); - -extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); - -extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); - -extern void ide_timer_expiry(struct timer_list *t); -extern irqreturn_t ide_intr(int irq, void *dev_id); -extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); -extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool); -extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); - -void ide_init_disk(struct gendisk *, ide_drive_t *); - -#ifdef CONFIG_IDEPCI_PCIBUS_ORDER -extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); -#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) -#else -#define ide_pci_register_driver(d) pci_register_driver(d) -#endif - -static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) -{ - if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5) - return 1; - return 0; -} - -void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, - struct ide_hw *, struct ide_hw **); -void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); - -#ifdef CONFIG_BLK_DEV_IDEDMA_PCI -int ide_pci_set_master(struct pci_dev *, const char *); -unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); -int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); -int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); -#else -static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, - const struct ide_port_info *d) -{ - return -EINVAL; -} -#endif - -struct ide_pci_enablebit { - u8 reg; /* byte pci reg holding the enable-bit */ - u8 mask; /* mask to isolate the enable-bit */ - u8 val; /* value of masked reg when "enabled" */ -}; - -enum { - /* Uses ISA control ports not PCI ones. */ - IDE_HFLAG_ISA_PORTS = BIT(0), - /* single port device */ - IDE_HFLAG_SINGLE = BIT(1), - /* don't use legacy PIO blacklist */ - IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2), - /* set for the second port of QD65xx */ - IDE_HFLAG_QD_2ND_PORT = BIT(3), - /* use PIO8/9 for prefetch off/on */ - IDE_HFLAG_ABUSE_PREFETCH = BIT(4), - /* use PIO6/7 for fast-devsel off/on */ - IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5), - /* use 100-102 and 200-202 PIO values to set DMA modes */ - IDE_HFLAG_ABUSE_DMA_MODES = BIT(6), - /* - * keep DMA setting when programming PIO mode, may be used only - * for hosts which have separate PIO and DMA timings (ie. PMAC) - */ - IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7), - /* program host for the transfer mode after programming device */ - IDE_HFLAG_POST_SET_MODE = BIT(8), - /* don't program host/device for the transfer mode ("smart" hosts) */ - IDE_HFLAG_NO_SET_MODE = BIT(9), - /* trust BIOS for programming chipset/device for DMA */ - IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10), - /* host is CS5510/CS5520 */ - IDE_HFLAG_CS5520 = BIT(11), - /* ATAPI DMA is unsupported */ - IDE_HFLAG_NO_ATAPI_DMA = BIT(12), - /* set if host is a "non-bootable" controller */ - IDE_HFLAG_NON_BOOTABLE = BIT(13), - /* host doesn't support DMA */ - IDE_HFLAG_NO_DMA = BIT(14), - /* check if host is PCI IDE device before allowing DMA */ - IDE_HFLAG_NO_AUTODMA = BIT(15), - /* host uses MMIO */ - IDE_HFLAG_MMIO = BIT(16), - /* no LBA48 */ - IDE_HFLAG_NO_LBA48 = BIT(17), - /* no LBA48 DMA */ - IDE_HFLAG_NO_LBA48_DMA = BIT(18), - /* data FIFO is cleared by an error */ - IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19), - /* serialize ports */ - IDE_HFLAG_SERIALIZE = BIT(20), - /* host is DTC2278 */ - IDE_HFLAG_DTC2278 = BIT(21), - /* 4 devices on a single set of I/O ports */ - IDE_HFLAG_4DRIVES = BIT(22), - /* host is TRM290 */ - IDE_HFLAG_TRM290 = BIT(23), - /* use 32-bit I/O ops */ - IDE_HFLAG_IO_32BIT = BIT(24), - /* unmask IRQs */ - IDE_HFLAG_UNMASK_IRQS = BIT(25), - IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26), - /* serialize ports if DMA is possible (for sl82c105) */ - IDE_HFLAG_SERIALIZE_DMA = BIT(27), - /* force host out of "simplex" mode */ - IDE_HFLAG_CLEAR_SIMPLEX = BIT(28), - /* DSC overlap is unsupported */ - IDE_HFLAG_NO_DSC = BIT(29), - /* never use 32-bit I/O ops */ - IDE_HFLAG_NO_IO_32BIT = BIT(30), - /* never unmask IRQs */ - IDE_HFLAG_NO_UNMASK_IRQS = BIT(31), -}; - -#ifdef CONFIG_BLK_DEV_OFFBOARD -# define IDE_HFLAG_OFF_BOARD 0 -#else -# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE -#endif - -struct ide_port_info { - char *name; - - int (*init_chipset)(struct pci_dev *); - - void (*get_lock)(irq_handler_t, void *); - void (*release_lock)(void); - - void (*init_iops)(ide_hwif_t *); - void (*init_hwif)(ide_hwif_t *); - int (*init_dma)(ide_hwif_t *, - const struct ide_port_info *); - - const struct ide_tp_ops *tp_ops; - const struct ide_port_ops *port_ops; - const struct ide_dma_ops *dma_ops; - - struct ide_pci_enablebit enablebits[2]; - - hwif_chipset_t chipset; - - u16 max_sectors; /* if < than the default one */ - - u32 host_flags; - - int irq_flags; - - u8 pio_mask; - u8 swdma_mask; - u8 mwdma_mask; - u8 udma_mask; -}; - -/* - * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME - * requests. - */ -struct ide_pm_state { - /* PM state machine step value, currently driver specific */ - int pm_step; - /* requested PM state value (S1, S2, S3, S4, ...) */ - u32 pm_state; - void* data; /* for driver use */ -}; - - -int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); -int ide_pci_init_two(struct pci_dev *, struct pci_dev *, - const struct ide_port_info *, void *); -void ide_pci_remove(struct pci_dev *); - -#ifdef CONFIG_PM -int ide_pci_suspend(struct pci_dev *, pm_message_t); -int ide_pci_resume(struct pci_dev *); -#else -#define ide_pci_suspend NULL -#define ide_pci_resume NULL -#endif - -void ide_map_sg(ide_drive_t *, struct ide_cmd *); -void ide_init_sg_cmd(struct ide_cmd *, unsigned int); - -#define BAD_DMA_DRIVE 0 -#define GOOD_DMA_DRIVE 1 - -struct drive_list_entry { - const char *id_model; - const char *id_firmware; -}; - -int ide_in_drive_list(u16 *, const struct drive_list_entry *); - -#ifdef CONFIG_BLK_DEV_IDEDMA -int ide_dma_good_drive(ide_drive_t *); -int __ide_dma_bad_drive(ide_drive_t *); - -u8 ide_find_dma_mode(ide_drive_t *, u8); - -static inline u8 ide_max_dma_mode(ide_drive_t *drive) -{ - return ide_find_dma_mode(drive, XFER_UDMA_6); -} - -void ide_dma_off_quietly(ide_drive_t *); -void ide_dma_off(ide_drive_t *); -void ide_dma_on(ide_drive_t *); -int ide_set_dma(ide_drive_t *); -void ide_check_dma_crc(ide_drive_t *); -ide_startstop_t ide_dma_intr(ide_drive_t *); - -int ide_allocate_dma_engine(ide_hwif_t *); -void ide_release_dma_engine(ide_hwif_t *); - -int ide_dma_prepare(ide_drive_t *, struct ide_cmd *); -void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *); - -#ifdef CONFIG_BLK_DEV_IDEDMA_SFF -int config_drive_for_dma(ide_drive_t *); -int ide_build_dmatable(ide_drive_t *, struct ide_cmd *); -void ide_dma_host_set(ide_drive_t *, int); -int ide_dma_setup(ide_drive_t *, struct ide_cmd *); -extern void ide_dma_start(ide_drive_t *); -int ide_dma_end(ide_drive_t *); -int ide_dma_test_irq(ide_drive_t *); -int ide_dma_sff_timer_expiry(ide_drive_t *); -u8 ide_dma_sff_read_status(ide_hwif_t *); -extern const struct ide_dma_ops sff_dma_ops; -#else -static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } -#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ - -void ide_dma_lost_irq(ide_drive_t *); -ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); - -#else -static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } -static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } -static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; } -static inline void ide_dma_off(ide_drive_t *drive) { ; } -static inline void ide_dma_on(ide_drive_t *drive) { ; } -static inline void ide_dma_verbose(ide_drive_t *drive) { ; } -static inline int ide_set_dma(ide_drive_t *drive) { return 1; } -static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } -static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } -static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } -static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } -static inline int ide_dma_prepare(ide_drive_t *drive, - struct ide_cmd *cmd) { return 1; } -static inline void ide_dma_unmap_sg(ide_drive_t *drive, - struct ide_cmd *cmd) { ; } -#endif /* CONFIG_BLK_DEV_IDEDMA */ - -#ifdef CONFIG_BLK_DEV_IDEACPI -int ide_acpi_init(void); -bool ide_port_acpi(ide_hwif_t *hwif); -extern int ide_acpi_exec_tfs(ide_drive_t *drive); -extern void ide_acpi_get_timing(ide_hwif_t *hwif); -extern void ide_acpi_push_timing(ide_hwif_t *hwif); -void ide_acpi_init_port(ide_hwif_t *); -void ide_acpi_port_init_devices(ide_hwif_t *); -extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); -#else -static inline int ide_acpi_init(void) { return 0; } -static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; } -static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } -static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } -static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } -static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; } -static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } -static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} -#endif - -void ide_check_nien_quirk_list(ide_drive_t *); -void ide_undecoded_slave(ide_drive_t *); - -void ide_port_apply_params(ide_hwif_t *); -int ide_sysfs_register_port(ide_hwif_t *); - -struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, - unsigned int); -void ide_host_free(struct ide_host *); -int ide_host_register(struct ide_host *, const struct ide_port_info *, - struct ide_hw **); -int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, - struct ide_host **); -void ide_host_remove(struct ide_host *); -int ide_legacy_device_add(const struct ide_port_info *, unsigned long); -void ide_port_unregister_devices(ide_hwif_t *); -void ide_port_scan(ide_hwif_t *); - -static inline void *ide_get_hwifdata (ide_hwif_t * hwif) -{ - return hwif->hwif_data; -} - -static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) -{ - hwif->hwif_data = data; -} - -u64 ide_get_lba_addr(struct ide_cmd *, int); -u8 ide_dump_status(ide_drive_t *, const char *, u8); - -struct ide_timing { - u8 mode; - u8 setup; /* t1 */ - u16 act8b; /* t2 for 8-bit io */ - u16 rec8b; /* t2i for 8-bit io */ - u16 cyc8b; /* t0 for 8-bit io */ - u16 active; /* t2 or tD */ - u16 recover; /* t2i or tK */ - u16 cycle; /* t0 */ - u16 udma; /* t2CYCTYP/2 */ -}; - -enum { - IDE_TIMING_SETUP = BIT(0), - IDE_TIMING_ACT8B = BIT(1), - IDE_TIMING_REC8B = BIT(2), - IDE_TIMING_CYC8B = BIT(3), - IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | - IDE_TIMING_CYC8B, - IDE_TIMING_ACTIVE = BIT(4), - IDE_TIMING_RECOVER = BIT(5), - IDE_TIMING_CYCLE = BIT(6), - IDE_TIMING_UDMA = BIT(7), - IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | - IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | - IDE_TIMING_CYCLE | IDE_TIMING_UDMA, -}; - -struct ide_timing *ide_timing_find_mode(u8); -u16 ide_pio_cycle_time(ide_drive_t *, u8); -void ide_timing_merge(struct ide_timing *, struct ide_timing *, - struct ide_timing *, unsigned int); -int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); - -#ifdef CONFIG_IDE_XFER_MODE -int ide_scan_pio_blacklist(char *); -const char *ide_xfer_verbose(u8); -int ide_pio_need_iordy(ide_drive_t *, const u8); -int ide_set_pio_mode(ide_drive_t *, u8); -int ide_set_dma_mode(ide_drive_t *, u8); -void ide_set_pio(ide_drive_t *, u8); -int ide_set_xfer_rate(ide_drive_t *, u8); -#else -static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; } -static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; } -#endif - -static inline void ide_set_max_pio(ide_drive_t *drive) -{ - ide_set_pio(drive, 255); -} - -char *ide_media_string(ide_drive_t *); - -extern const struct attribute_group *ide_dev_groups[]; -extern struct bus_type ide_bus_type; -extern struct class *ide_port_class; - -static inline void ide_dump_identify(u8 *id) -{ - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0); -} - -static inline int hwif_to_node(ide_hwif_t *hwif) -{ - return hwif->dev ? dev_to_node(hwif->dev) : -1; -} - -static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) -{ - ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1]; - - return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL; -} - -static inline void *ide_get_drivedata(ide_drive_t *drive) -{ - return drive->drive_data; -} - -static inline void ide_set_drivedata(ide_drive_t *drive, void *data) -{ - drive->drive_data = data; -} - -#define ide_port_for_each_dev(i, dev, port) \ - for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) - -#define ide_port_for_each_present_dev(i, dev, port) \ - for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \ - if ((dev)->dev_flags & IDE_DFLAG_PRESENT) - -#define ide_host_for_each_port(i, port, host) \ - for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) - - -#endif /* _IDE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 2967437f1b11..694264503119 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2020 Intel Corporation + * Copyright (c) 2018 - 2021 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -1088,6 +1088,48 @@ struct ieee80211_ext { } u; } __packed __aligned(2); +#define IEEE80211_TWT_CONTROL_NDP BIT(0) +#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) +#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) +#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4) +#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5) + +#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0) +#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1) +#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4) +#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5) +#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6) +#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7) +#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10) +#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15) + +enum ieee80211_twt_setup_cmd { + TWT_SETUP_CMD_REQUEST, + TWT_SETUP_CMD_SUGGEST, + TWT_SETUP_CMD_DEMAND, + TWT_SETUP_CMD_GROUPING, + TWT_SETUP_CMD_ACCEPT, + TWT_SETUP_CMD_ALTERNATE, + TWT_SETUP_CMD_DICTATE, + TWT_SETUP_CMD_REJECT, +}; + +struct ieee80211_twt_params { + __le16 req_type; + __le64 twt; + u8 min_twt_dur; + __le16 mantissa; + u8 channel; +} __packed; + +struct ieee80211_twt_setup { + u8 dialog_token; + u8 element_id; + u8 length; + u8 control; + u8 params[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1252,6 +1294,10 @@ struct ieee80211_mgmt { __le16 toa_error; u8 variable[0]; } __packed ftm; + struct { + u8 action_code; + u8 variable[]; + } __packed s1g; } u; } __packed action; } u; @@ -2179,6 +2225,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0 +#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01 + /* 802.11ax HE TX/RX MCS NSS Support */ #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) #define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) @@ -2264,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 + /** * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel @@ -2280,6 +2331,7 @@ struct ieee80211_he_6ghz_oper { #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 #define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 +#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 u8 control; u8 ccfs0; u8 ccfs1; @@ -2287,6 +2339,44 @@ struct ieee80211_he_6ghz_oper { } __packed; /* + * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", + * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation + * subfield encoding", and two category for each type in "Table E-12-Regulatory + * Info subfield encoding in the United States". + * So it it totally max 8 Transmit Power Envelope element. + */ +#define IEEE80211_TPE_MAX_IE_COUNT 8 +/* + * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" + * of "IEEE Std 802.11ax™‐2021", the max power level is 8. + */ +#define IEEE80211_MAX_NUM_PWR_LEVEL 8 + +#define IEEE80211_TPE_MAX_POWER_COUNT 8 + +/* transmit power interpretation type of transmit power envelope element */ +enum ieee80211_tx_power_intrpt_type { + IEEE80211_TPE_LOCAL_EIRP, + IEEE80211_TPE_LOCAL_EIRP_PSD, + IEEE80211_TPE_REG_CLIENT_EIRP, + IEEE80211_TPE_REG_CLIENT_EIRP_PSD, +}; + +/** + * struct ieee80211_tx_pwr_env + * + * This structure represents the "Transmit Power Envelope element" + */ +struct ieee80211_tx_pwr_env { + u8 tx_power_info; + s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; +} __packed; + +#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 +#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 +#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 + +/* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte * after the ext ID byte. It is assumed that he_oper_ie has at least @@ -2867,7 +2957,7 @@ enum ieee80211_eid { WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, @@ -2879,6 +2969,7 @@ enum ieee80211_eid { WLAN_EID_AID_RESPONSE = 211, WLAN_EID_S1G_BCN_COMPAT = 213, WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_TWT = 216, WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, @@ -2933,6 +3024,7 @@ enum ieee80211_category { WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, WLAN_CATEGORY_RADIO_MEASUREMENT = 5, + WLAN_CATEGORY_FAST_BBS_TRANSITION = 6, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -2947,6 +3039,7 @@ enum ieee80211_category { WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_S1G = 22, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; @@ -3020,6 +3113,20 @@ enum ieee80211_key_len { WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; +enum ieee80211_s1g_actioncode { + WLAN_S1G_AID_SWITCH_REQUEST, + WLAN_S1G_AID_SWITCH_RESPONSE, + WLAN_S1G_SYNC_CONTROL, + WLAN_S1G_STA_INFO_ANNOUNCE, + WLAN_S1G_EDCA_PARAM_SET, + WLAN_S1G_EL_OPERATION, + WLAN_S1G_TWT_SETUP, + WLAN_S1G_TWT_TEARDOWN, + WLAN_S1G_SECT_GROUP_ID_LIST, + WLAN_S1G_SECT_ID_FEEDBACK, + WLAN_S1G_TWT_INFORMATION = 11, +}; + #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 @@ -3110,6 +3217,11 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) +/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte + * of the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7) + /* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */ #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index bf5c5f32c65e..b712217f7030 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -48,6 +48,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: + case ARPHRD_IP6GRE: case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 2cc35038a8ca..509e18c7e740 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -57,20 +57,25 @@ struct br_ip_list { #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) +#define BR_TX_FWD_OFFLOAD BIT(20) #define BR_DEFAULT_AGEING_TIME (300 * HZ) -extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); +struct net_bridge; +void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, + unsigned int cmd, struct ifreq *ifr, + void __user *uarg)); +int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, + struct ifreq *ifr, void __user *uarg); #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_has_router_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); -int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb, struct netlink_ext_ack *extack); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -87,6 +92,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } + +static inline bool br_multicast_has_router_adjacent(struct net_device *dev, + int proto) +{ + return true; +} + static inline bool br_multicast_enabled(const struct net_device *dev) { return false; @@ -95,13 +107,6 @@ static inline bool br_multicast_router(const struct net_device *dev) { return false; } -static inline int br_mdb_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) @@ -111,8 +116,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); -int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb, struct netlink_ext_ack *extack); +int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -140,12 +145,10 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, return -EINVAL; } -static inline int br_vlan_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb, - struct netlink_ext_ack *extack) +static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo) { - return -EOPNOTSUPP; + return -EINVAL; } #endif @@ -156,9 +159,7 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev, void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); u8 br_port_get_stp_state(const struct net_device *dev); -clock_t br_get_ageing_time(struct net_device *br_dev); -int br_fdb_replay(struct net_device *br_dev, struct net_device *dev, - struct notifier_block *nb); +clock_t br_get_ageing_time(const struct net_device *br_dev); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -183,17 +184,10 @@ static inline u8 br_port_get_stp_state(const struct net_device *dev) return BR_STATE_DISABLED; } -static inline clock_t br_get_ageing_time(struct net_device *br_dev) +static inline clock_t br_get_ageing_time(const struct net_device *br_dev) { return 0; } - -static inline int br_fdb_replay(struct net_device *br_dev, - struct net_device *dev, - struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} #endif #endif diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h index 4efb537f57f3..10e7521ecb6c 100644 --- a/include/linux/if_rmnet.h +++ b/include/linux/if_rmnet.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only - * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved. */ #ifndef _LINUX_IF_RMNET_H_ @@ -12,10 +12,12 @@ struct rmnet_map_header { } __aligned(1); /* rmnet_map_header flags field: - * PAD_LEN: number of pad bytes following packet data - * CMD: 1 = packet contains a MAP command; 0 = packet contains data + * PAD_LEN: number of pad bytes following packet data + * CMD: 1 = packet contains a MAP command; 0 = packet contains data + * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header */ #define MAP_PAD_LEN_MASK GENMASK(5, 0) +#define MAP_NEXT_HEADER_FLAG BIT(6) #define MAP_CMD_FLAG BIT(7) struct rmnet_map_dl_csum_trailer { @@ -23,7 +25,7 @@ struct rmnet_map_dl_csum_trailer { u8 flags; /* MAP_CSUM_DL_VALID_FLAG */ __be16 csum_start_offset; __be16 csum_length; - __be16 csum_value; + __sum16 csum_value; } __aligned(1); /* rmnet_map_dl_csum_trailer flags field: @@ -45,4 +47,26 @@ struct rmnet_map_ul_csum_header { #define MAP_CSUM_UL_UDP_FLAG BIT(14) #define MAP_CSUM_UL_ENABLED_FLAG BIT(15) +/* MAP CSUM headers */ +struct rmnet_map_v5_csum_header { + u8 header_info; + u8 csum_info; + __be16 reserved; +} __aligned(1); + +/* v5 header_info field + * NEXT_HEADER: represents whether there is any next header + * HEADER_TYPE: represents the type of this header + * + * csum_info field + * CSUM_VALID_OR_REQ: + * 1 = for UL, checksum computation is requested. + * 1 = for DL, validated the checksum and has found it valid + */ + +#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0) +#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1) +#define MAPV5_CSUMINFO_VALID_FLAG BIT(7) + +#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2 #endif /* !(_LINUX_IF_RMNET_H_) */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 64ce8cd1cfaf..93c262ecbdc9 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -41,9 +41,6 @@ struct ip_sf_socklist { __be32 sl_addr[]; }; -#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ - (count) * sizeof(__be32)) - #define IP_SFBLOCK 10 /* allocate this many at once */ /* ip_mc_socklist is real list now. Speed is not argument; diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 7199280d89ca..c525fd51652f 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -26,6 +26,7 @@ struct ad_sd_calib_data { }; struct ad_sigma_delta; +struct device; struct iio_dev; /** @@ -132,8 +133,7 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, struct spi_device *spi, const struct ad_sigma_delta_info *info); -int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev); -void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); +int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7ce8a8adad58..c582e1a14232 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state { u16 scale; } calib[CROS_EC_SENSOR_MAX_AXIS]; s8 sign[CROS_EC_SENSOR_MAX_AXIS]; - u8 samples[CROS_EC_SAMPLE_SIZE]; + u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8); int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 33e939977444..8bdbaf3f3796 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -13,6 +13,7 @@ #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/irqreturn.h> +#include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> @@ -20,6 +21,8 @@ #include <linux/platform_data/st_sensors_pdata.h> +#define LSM9DS0_IMU_DEV_NAME "lsm9ds0" + /* * Buffer size max case: 2bytes per channel, 3 channels in total + * 8bytes timestamp channel (s64) @@ -46,8 +49,8 @@ #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 8 -#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ - ch2, s, endian, rbits, sbits, addr) \ +#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr, ext) \ { \ .type = device_type, \ .modified = mod, \ @@ -63,8 +66,14 @@ .storagebits = sbits, \ .endianness = endian, \ }, \ + .ext_info = ext, \ } +#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr) \ + ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr, NULL) + #define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ st_sensors_sysfs_sampling_frequency_avail) @@ -213,6 +222,7 @@ struct st_sensor_settings { * struct st_sensor_data - ST sensor device status * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. + * @mount_matrix: The mounting matrix of the sensor. * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply @@ -232,7 +242,7 @@ struct st_sensor_settings { struct st_sensor_data { struct device *dev; struct iio_trigger *trig; - struct iio_mount_matrix *mount_matrix; + struct iio_mount_matrix mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; @@ -317,4 +327,24 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, void st_sensors_dev_name_probe(struct device *dev, char *name, int len); +/* Accelerometer */ +const struct st_sensor_settings *st_accel_get_settings(const char *name); +int st_accel_common_probe(struct iio_dev *indio_dev); +void st_accel_common_remove(struct iio_dev *indio_dev); + +/* Gyroscope */ +const struct st_sensor_settings *st_gyro_get_settings(const char *name); +int st_gyro_common_probe(struct iio_dev *indio_dev); +void st_gyro_common_remove(struct iio_dev *indio_dev); + +/* Magnetometer */ +const struct st_sensor_settings *st_magn_get_settings(const char *name); +int st_magn_common_probe(struct iio_dev *indio_dev); +void st_magn_common_remove(struct iio_dev *indio_dev); + +/* Pressure */ +const struct st_sensor_settings *st_press_get_settings(const char *name); +int st_press_common_probe(struct iio_dev *indio_dev); +void st_press_common_remove(struct iio_dev *indio_dev); + #endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 32addd5e790e..c9504e9da571 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,6 +6,10 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @id: used to identify device internally + * @driver_module: used to make it harder to undercut users + * @info_exist_lock: lock to prevent use during removal + * @trig_readonly: mark the current trigger immutable * @event_interface: event chrdevs associated with interrupt lines * @attached_buffers: array of buffers statically attached by the driver * @attached_buffers_cnt: number of buffers in the array of statically attached buffers @@ -19,6 +23,10 @@ * @groupcounter: index of next attribute group * @legacy_scan_el_group: attribute group for legacy scan elements attribute group * @legacy_buffer_group: attribute group for legacy buffer attributes group + * @scan_index_timestamp: cache of the index to the timestamp + * @clock_id: timestamping clock posix identifier + * @chrdev: associated character device + * @flags: file ops related flags including busy flag. * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -26,6 +34,10 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + int id; + struct module *driver_module; + struct mutex info_exist_lock; + bool trig_readonly; struct iio_event_interface *event_interface; struct iio_buffer **attached_buffers; unsigned int attached_buffers_cnt; @@ -38,6 +50,12 @@ struct iio_dev_opaque { int groupcounter; struct attribute_group legacy_scan_el_group; struct attribute_group legacy_buffer_group; + + unsigned int scan_index_timestamp; + clockid_t clock_id; + struct cdev chrdev; + unsigned long flags; + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; @@ -46,7 +64,7 @@ struct iio_dev_opaque { #endif }; -#define to_iio_dev_opaque(indio_dev) \ - container_of(indio_dev, struct iio_dev_opaque, indio_dev) +#define to_iio_dev_opaque(_indio_dev) \ + container_of((_indio_dev), struct iio_dev_opaque, indio_dev) #endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f2d65e2e88b6..324561b7a5e8 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -127,8 +127,7 @@ struct iio_mount_matrix { ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); -int iio_read_mount_matrix(struct device *dev, const char *propname, - struct iio_mount_matrix *matrix); +int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix); typedef const struct iio_mount_matrix * (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, @@ -488,8 +487,6 @@ struct iio_buffer_setup_ops { /** * struct iio_dev - industrial I/O device - * @id: [INTERN] used to identify device internally - * @driver_module: [INTERN] used to make it harder to undercut users * @modes: [DRIVER] operating modes supported by device * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent @@ -503,9 +500,7 @@ struct iio_buffer_setup_ops { * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers * @scan_timestamp: [INTERN] set if any buffers have requested timestamp - * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) - * @trig_readonly: [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table @@ -513,19 +508,12 @@ struct iio_buffer_setup_ops { * @name: [DRIVER] name of the device. * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver - * @clock_id: [INTERN] timestamping clock posix identifier - * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable - * @chrdev: [INTERN] associated character device - * @flags: [INTERN] file ops related flags including busy flag. * @priv: [DRIVER] reference to driver's private information * **MUST** be accessed **ONLY** via iio_priv() helper */ struct iio_dev { - int id; - struct module *driver_module; - int modes; int currentmode; struct device dev; @@ -538,9 +526,7 @@ struct iio_dev { unsigned masklength; const unsigned long *active_scan_mask; bool scan_timestamp; - unsigned scan_index_timestamp; struct iio_trigger *trig; - bool trig_readonly; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; @@ -550,15 +536,13 @@ struct iio_dev { const char *name; const char *label; const struct iio_info *info; - clockid_t clock_id; - struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; - struct cdev chrdev; - unsigned long flags; void *priv; }; +int iio_device_id(struct iio_dev *indio_dev); + const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); /** @@ -602,15 +586,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev) put_device(&indio_dev->dev); } -/** - * iio_device_get_clock() - Retrieve current timestamping clock for the device - * @indio_dev: IIO device structure containing the device - */ -static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) -{ - return indio_dev->clock_id; -} - +clockid_t iio_device_get_clock(const struct iio_dev *indio_dev); int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id); /** diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index f9b728d490b1..cf49997d5903 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -55,6 +55,7 @@ struct adis_timeout { * this should be the minimum size supported by the device. * @burst_max_len: Holds the maximum burst size when the device supports * more than one burst mode with different sizes + * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode */ struct adis_data { unsigned int read_delay; @@ -83,6 +84,7 @@ struct adis_data { unsigned int burst_reg_cmd; unsigned int burst_len; unsigned int burst_max_len; + unsigned int burst_max_speed_hz; }; /** diff --git a/include/linux/ima.h b/include/linux/ima.h index 61d5723ec303..b6ab66a546ae 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -11,9 +11,11 @@ #include <linux/fs.h> #include <linux/security.h> #include <linux/kexec.h> +#include <crypto/hash_info.h> struct linux_binprm; #ifdef CONFIG_IMA +extern enum hash_algo ima_get_current_hash_algo(void); extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns, @@ -33,10 +35,10 @@ extern void ima_post_path_mknod(struct user_namespace *mnt_userns, extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size); extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); -extern void ima_measure_critical_data(const char *event_label, - const char *event_name, - const void *buf, size_t buf_len, - bool hash); +extern int ima_measure_critical_data(const char *event_label, + const char *event_name, + const void *buf, size_t buf_len, + bool hash, u8 *digest, size_t digest_len); #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM extern void ima_appraise_parse_cmdline(void); @@ -64,6 +66,11 @@ static inline const char * const *arch_get_ima_policy(void) #endif #else +static inline enum hash_algo ima_get_current_hash_algo(void) +{ + return HASH_ALGO__LAST; +} + static inline int ima_bprm_check(struct linux_binprm *bprm) { return 0; @@ -137,10 +144,14 @@ static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} -static inline void ima_measure_critical_data(const char *event_label, +static inline int ima_measure_critical_data(const char *event_label, const char *event_name, const void *buf, size_t buf_len, - bool hash) {} + bool hash, u8 *digest, + size_t digest_len) +{ + return -ENOENT; +} #endif /* CONFIG_IMA */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 53aa0343bf69..a038feb63f23 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -41,7 +41,7 @@ struct in_device { unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; - unsigned char mr_ifc_count; + u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h index 77582ae1745a..ee1d44545f30 100644 --- a/include/linux/input/cy8ctmg110_pdata.h +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -5,7 +5,6 @@ struct cy8ctmg110_pdata { int reset_pin; /* Reset pin is wired to this GPIO (optional) */ - int irq_pin; /* IRQ pin is wired to this GPIO */ }; #endif diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h deleted file mode 100644 index 118b9af6e01a..000000000000 --- a/include/linux/input/cyttsp.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header file for: - * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. - * For use with Cypress Txx3xx parts. - * Supported parts include: - * CY8CTST341 - * CY8CTMA340 - * - * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. - * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> - * - * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) - */ -#ifndef _CYTTSP_H_ -#define _CYTTSP_H_ - -#define CY_SPI_NAME "cyttsp-spi" -#define CY_I2C_NAME "cyttsp-i2c" -/* Active Power state scanning/processing refresh interval */ -#define CY_ACT_INTRVL_DFLT 0x00 /* ms */ -/* touch timeout for the Active power */ -#define CY_TCH_TMOUT_DFLT 0xFF /* ms */ -/* Low Power state scanning/processing refresh interval */ -#define CY_LP_INTRVL_DFLT 0x0A /* ms */ -/* Active distance in pixels for a gesture to be reported */ -#define CY_ACT_DIST_DFLT 0xF8 /* pixels */ - -#endif /* _CYTTSP_H_ */ diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h index 93e2ad67fc10..fa2cd8c63dcc 100644 --- a/include/linux/instrumentation.h +++ b/include/linux/instrumentation.h @@ -4,13 +4,16 @@ #if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION) +#include <linux/stringify.h> + /* Begin/end of an instrumentation safe region */ -#define instrumentation_begin() ({ \ - asm volatile("%c0: nop\n\t" \ +#define __instrumentation_begin(c) ({ \ + asm volatile(__stringify(c) ": nop\n\t" \ ".pushsection .discard.instr_begin\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define instrumentation_begin() __instrumentation_begin(__COUNTER__) /* * Because instrumentation_{begin,end}() can nest, objtool validation considers @@ -43,12 +46,13 @@ * To avoid this, have _end() be a NOP instruction, this ensures it will be * part of the condition block and does not escape. */ -#define instrumentation_end() ({ \ - asm volatile("%c0: nop\n\t" \ +#define __instrumentation_end(c) ({ \ + asm volatile(__stringify(c) ": nop\n\t" \ ".pushsection .discard.instr_end\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + ".long " __stringify(c) "b - .\n\t" \ + ".popsection\n\t"); \ }) +#define instrumentation_end() __instrumentation_end(__COUNTER__) #else # define instrumentation_begin() do { } while(0) # define instrumentation_end() do { } while(0) diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 2271939c5c31..2ea0f2f65ab6 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -13,6 +13,7 @@ enum integrity_status { INTEGRITY_PASS = 0, INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, + INTEGRITY_FAIL_IMMUTABLE, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, INTEGRITY_UNKNOWN, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 03faf20a6817..05a65eb155f7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -124,9 +124,9 @@ #define DMAR_MTRR_PHYSMASK8_REG 0x208 #define DMAR_MTRR_PHYSBASE9_REG 0x210 #define DMAR_MTRR_PHYSMASK9_REG 0x218 -#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */ -#define DMAR_VCMD_REG 0xe10 /* Virtual command register */ -#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ +#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ +#define DMAR_VCMD_REG 0xe00 /* Virtual command register */ +#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) @@ -537,7 +537,7 @@ struct context_entry { struct dmar_domain { int nid; /* node id */ - unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; + unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED]; /* Refcount of devices per iommu */ @@ -546,7 +546,10 @@ struct dmar_domain { * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ - bool has_iotlb_device; + u8 has_iotlb_device: 1; + u8 iommu_coherency: 1; /* indicate coherency of iommu access */ + u8 iommu_snooping: 1; /* indicate snooping control feature */ + struct list_head devices; /* all devices' list */ struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ @@ -558,10 +561,6 @@ struct dmar_domain { int agaw; int flags; /* flags to find out type of domain */ - - int iommu_coherency;/* indicate coherency of iommu access */ - int iommu_snooping; /* indicate snooping control feature*/ - int iommu_count; /* reference count of iommu */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ @@ -606,6 +605,8 @@ struct intel_iommu { struct completion prq_complete; struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ #endif + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[16]; struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -619,6 +620,7 @@ struct intel_iommu { u32 flags; /* Software defined flags */ struct dmar_drhd_unit *drhd; + void *perf_statistic; }; /* Per subdevice private data */ @@ -776,6 +778,7 @@ struct intel_svm_dev { struct device *dev; struct intel_iommu *iommu; struct iommu_sva sva; + unsigned long prq_seq_number; u32 pasid; int users; u16 did; @@ -791,7 +794,6 @@ struct intel_svm { u32 pasid; int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; - struct list_head list; }; #else static inline void intel_svm_check(struct intel_iommu *iommu) {} @@ -827,4 +829,32 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) #define intel_iommu_enabled (0) #endif +static inline const char *decode_prq_descriptor(char *str, size_t size, + u64 dw0, u64 dw1, u64 dw2, u64 dw3) +{ + char *buf = str; + int bytes; + + bytes = snprintf(buf, size, + "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", + FIELD_GET(GENMASK_ULL(31, 16), dw0), + FIELD_GET(GENMASK_ULL(63, 12), dw1), + dw1 & BIT_ULL(0) ? 'r' : '-', + dw1 & BIT_ULL(1) ? 'w' : '-', + dw0 & BIT_ULL(52) ? 'x' : '-', + dw0 & BIT_ULL(53) ? 'p' : '-', + dw1 & BIT_ULL(2) ? 'l' : '-', + FIELD_GET(GENMASK_ULL(51, 32), dw0), + FIELD_GET(GENMASK_ULL(11, 3), dw1)); + + /* Private Data */ + if (dw0 & BIT_ULL(9)) { + size -= bytes; + buf += bytes; + snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3); + } + + return str; +} + #endif diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 0d6b4bc191c5..aee8ff4739b1 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -8,11 +8,17 @@ #ifndef _INTEL_ISH_CLIENT_IF_H_ #define _INTEL_ISH_CLIENT_IF_H_ +#include <linux/device.h> +#include <linux/uuid.h> + struct ishtp_cl_device; struct ishtp_device; struct ishtp_cl; struct ishtp_fw_client; +typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev, + const char *format, ...); + /* Client state */ enum cl_state { ISHTP_CL_INITIALIZING = 0, @@ -36,7 +42,7 @@ struct ishtp_cl_driver { const char *name; const guid_t *guid; int (*probe)(struct ishtp_cl_device *dev); - int (*remove)(struct ishtp_cl_device *dev); + void (*remove)(struct ishtp_cl_device *dev); int (*reset)(struct ishtp_cl_device *dev); const struct dev_pm_ops *pm; }; @@ -75,8 +81,10 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device, /* Get the device * from ishtp device instance */ struct device *ishtp_device(struct ishtp_cl_device *cl_device); +/* wait for IPC resume */ +bool ishtp_wait_resume(struct ishtp_device *dev); /* Trace interface for clients */ -void *ishtp_trace_callback(struct ishtp_cl_device *cl_device); +ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device); diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 10fa80eef13a..57cceecbe37f 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -14,6 +14,11 @@ #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) +/* Page Request Queue depth */ +#define PRQ_ORDER 2 +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) +#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) + /* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4777850a6dc7..1f22a30c0963 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -13,6 +13,7 @@ #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> +#include <linux/jump_label.h> #include <linux/atomic.h> #include <asm/ptrace.h> @@ -64,6 +65,8 @@ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. * Users will enable it explicitly by enable_irq() or enable_nmi() * later. + * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, + * depends on IRQF_PERCPU. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -78,6 +81,7 @@ #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 #define IRQF_NO_AUTOEN 0x00080000 +#define IRQF_NO_DEBUG 0x00100000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -319,39 +323,8 @@ struct irq_affinity_desc { extern cpumask_var_t irq_default_affinity; -/* Internal implementation. Use the helpers below */ -extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, - bool force); - -/** - * irq_set_affinity - Set the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Fails if cpumask does not contain an online CPU - */ -static inline int -irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, false); -} - -/** - * irq_force_affinity - Force the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Same as irq_set_affinity, but without checking the mask against - * online cpus. - * - * Solely for low level cpu hotplug code, where we need to make per - * cpu interrupts affine before the cpu becomes online. - */ -static inline int -irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, true); -} +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); @@ -502,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, #ifdef CONFIG_IRQ_FORCED_THREADING # ifdef CONFIG_PREEMPT_RT -# define force_irqthreads (true) +# define force_irqthreads() (true) # else -extern bool force_irqthreads; +DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); +# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) # endif #else -#define force_irqthreads (0) +#define force_irqthreads() (false) #endif #ifndef local_softirq_pending diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 4d40dfa75b55..86af6f0a00a2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -16,6 +16,7 @@ enum io_pgtable_fmt { ARM_V7S, ARM_MALI_LPAE, AMD_IOMMU_V1, + APPLE_DART, IO_PGTABLE_NUM_FMTS, }; @@ -73,10 +74,6 @@ struct io_pgtable_cfg { * to support up to 35 bits PA where the bit32, bit33 and bit34 are * encoded in the bit9, bit4 and bit5 of the PTE respectively. * - * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs - * on unmap, for DMA domains using the flush queue mechanism for - * delayed invalidation. - * * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table * for use in the upper half of a split address space. * @@ -86,7 +83,6 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) - #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) unsigned long quirks; @@ -136,6 +132,11 @@ struct io_pgtable_cfg { u64 transtab; u64 memattr; } arm_mali_lpae_cfg; + + struct { + u64 ttbr[4]; + u32 n_ttbrs; + } apple_dart_cfg; }; }; @@ -143,7 +144,9 @@ struct io_pgtable_cfg { * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * * @map: Map a physically contiguous memory region. + * @map_pages: Map a physically contiguous range of pages of the same size. * @unmap: Unmap a physically contiguous memory region. + * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. * * These functions map directly onto the iommu_ops member functions with @@ -152,8 +155,14 @@ struct io_pgtable_cfg { struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); + size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; @@ -246,5 +255,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; +extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns; #endif /* __IO_PGTABLE_H */ diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 04b650bcbbe5..649a4d7c241b 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -7,17 +7,18 @@ #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_cancel(struct files_struct *files); +void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { if (current->io_uring) - __io_uring_cancel(files); + __io_uring_cancel(false); } static inline void io_uring_task_cancel(void) { - return io_uring_files_cancel(NULL); + if (current->io_uring) + __io_uring_cancel(true); } static inline void io_uring_free(struct task_struct *tsk) { @@ -32,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file) static inline void io_uring_task_cancel(void) { } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h new file mode 100644 index 000000000000..94a24b36998f --- /dev/null +++ b/include/linux/ioam6.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_H +#define _LINUX_IOAM6_H + +#include <uapi/linux/ioam6.h> + +#endif /* _LINUX_IOAM6_H */ diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h new file mode 100644 index 000000000000..176e67919de3 --- /dev/null +++ b/include/linux/ioam6_genl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Generic Netlink API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_GENL_H +#define _LINUX_IOAM6_GENL_H + +#include <uapi/linux/ioam6_genl.h> + +#endif /* _LINUX_IOAM6_GENL_H */ diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h new file mode 100644 index 000000000000..07d9dfedd29d --- /dev/null +++ b/include/linux/ioam6_iptunnel.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Lightweight Tunnel API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_IPTUNNEL_H +#define _LINUX_IOAM6_IPTUNNEL_H + +#include <uapi/linux/ioam6_iptunnel.h> + +#endif /* _LINUX_IOAM6_IPTUNNEL_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index c87d0cb0de6d..24f8489583ca 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -91,13 +91,30 @@ struct iomap { const struct iomap_page_ops *page_ops; }; -static inline sector_t -iomap_sector(struct iomap *iomap, loff_t pos) +static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) { return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } /* + * Returns the inline data pointer for logical offset @pos. + */ +static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos) +{ + return iomap->inline_data + pos - iomap->offset; +} + +/* + * Check if the mapping's length is within the valid range for inline data. + * This is used to guard against accessing data beyond the page inline_data + * points at. + */ +static inline bool iomap_inline_data_valid(const struct iomap *iomap) +{ + return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); +} + +/* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to * buffered writes as unbuffered writes will not typically have pages @@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos) * associated page could not be obtained. */ struct iomap_page_ops { - int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len, - struct iomap *iomap); + int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, - struct page *page, struct iomap *iomap); + struct page *page); }; /* @@ -124,6 +140,7 @@ struct iomap_page_ops { #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ +#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ struct iomap_ops { /* @@ -145,21 +162,66 @@ struct iomap_ops { ssize_t written, unsigned flags, struct iomap *iomap); }; -/* - * Main iomap iterator function. +/** + * struct iomap_iter - Iterate through a range of a file + * @inode: Set at the start of the iteration and should not change. + * @pos: The current file position we are operating on. It is updated by + * calls to iomap_iter(). Treat as read-only in the body. + * @len: The remaining length of the file segment we're operating on. + * It is updated at the same time as @pos. + * @processed: The number of bytes processed by the body in the most recent + * iteration, or a negative errno. 0 causes the iteration to stop. + * @flags: Zero or more of the iomap_begin flags above. + * @iomap: Map describing the I/O iteration + * @srcmap: Source map for COW operations */ -typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, - void *data, struct iomap *iomap, struct iomap *srcmap); +struct iomap_iter { + struct inode *inode; + loff_t pos; + u64 len; + s64 processed; + unsigned flags; + struct iomap iomap; + struct iomap srcmap; +}; -loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, - unsigned flags, const struct iomap_ops *ops, void *data, - iomap_actor_t actor); +int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); + +/** + * iomap_length - length of the current iomap iteration + * @iter: iteration structure + * + * Returns the length that the operation applies to for the current iteration. + */ +static inline u64 iomap_length(const struct iomap_iter *iter) +{ + u64 end = iter->iomap.offset + iter->iomap.length; + + if (iter->srcmap.type != IOMAP_HOLE) + end = min(end, iter->srcmap.offset + iter->srcmap.length); + return min(iter->len, end - iter->pos); +} + +/** + * iomap_iter_srcmap - return the source map for the current iomap iteration + * @i: iteration structure + * + * Write operations on file systems with reflink support might require a + * source and a destination map. This function retourns the source map + * for a given operation, which may or may no be identical to the destination + * map in &i->iomap. + */ +static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) +{ + if (i->srcmap.type != IOMAP_HOLE) + return &i->srcmap; + return &i->iomap; +} ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_set_page_dirty(struct page *page); int iomap_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); @@ -251,8 +313,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); - blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, - struct bio *bio, loff_t file_offset); + blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, + loff_t file_offset); }; /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 32d448050bf7..d2f3435e7d17 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -40,6 +40,7 @@ struct iommu_domain; struct notifier_block; struct iommu_sva; struct iommu_fault_event; +struct iommu_dma_cookie; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -60,6 +61,7 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API implementation */ #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ +#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ /* * This are the possible domain-types @@ -72,12 +74,17 @@ struct iommu_domain_geometry { * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. * This flag allows IOMMU drivers to implement * certain optimizations for these domains + * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB + * invalidation. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API) +#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ + __IOMMU_DOMAIN_DMA_API | \ + __IOMMU_DOMAIN_DMA_FQ) struct iommu_domain { unsigned type; @@ -86,9 +93,14 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; - void *iova_cookie; + struct iommu_dma_cookie *iova_cookie; }; +static inline bool iommu_is_dma_domain(struct iommu_domain *domain) +{ + return domain->type & __IOMMU_DOMAIN_DMA_API; +} + enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ @@ -160,16 +172,22 @@ enum iommu_dev_features { * @start: IOVA representing the start of the range to be flushed * @end: IOVA representing the end of the range to be flushed (inclusive) * @pgsize: The interval at which to perform the flush + * @freelist: Removed pages to free after sync + * @queued: Indicates that the flush will be queued * * This structure is intended to be updated by multiple calls to the * ->unmap() function in struct iommu_ops before eventually being passed - * into ->iotlb_sync(). + * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after + * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to + * them. @queued is set to indicate when ->iotlb_flush_all() will be called + * later instead of ->iotlb_sync(), so drivers may optimise accordingly. */ struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; struct page *freelist; + bool queued; }; /** @@ -180,7 +198,10 @@ struct iommu_iotlb_gather { * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush @@ -229,8 +250,14 @@ struct iommu_ops { void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, size_t size); @@ -414,11 +441,11 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); -extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg,unsigned int nents, int prot); -extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot); +extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); +extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -476,8 +503,7 @@ int iommu_enable_nesting(struct iommu_domain *domain); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks); -void iommu_set_dma_strict(bool val); -bool iommu_get_dma_strict(struct iommu_domain *domain); +void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -497,29 +523,80 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, iommu_iotlb_gather_init(iotlb_gather); } +/** + * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint + * + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to check whether a new range and the gathered range + * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better + * than merging the two, which might lead to unnecessary invalidations. + */ +static inline +bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ + unsigned long start = iova, end = start + size - 1; + + return gather->end != 0 && + (end + 1 < gather->start || start > gather->end + 1); +} + + +/** + * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands + * where only the address range matters, and simply minimising intermediate + * syncs is preferred. + */ +static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ + unsigned long end = iova + size - 1; + + if (gather->start > iova) + gather->start = iova; + if (gather->end < end) + gather->end = end; +} + +/** + * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation + * @domain: IOMMU domain to be invalidated + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to build invalidation commands based on individual + * pages, or with page size/table level hints which cannot be gathered if they + * differ. + */ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size) { - unsigned long start = iova, end = start + size - 1; - /* * If the new page is disjoint from the current range or is mapped at * a different granularity, then sync the TLB so that the gather * structure can be rewritten. */ - if (gather->pgsize != size || - end + 1 < gather->start || start > gather->end + 1) { - if (gather->pgsize) - iommu_iotlb_sync(domain, gather); - gather->pgsize = size; - } + if ((gather->pgsize && gather->pgsize != size) || + iommu_iotlb_gather_is_disjoint(gather, iova, size)) + iommu_iotlb_sync(domain, gather); - if (gather->end < end) - gather->end = end; + gather->pgsize = size; + iommu_iotlb_gather_add_range(gather, iova, size); +} - if (gather->start > start) - gather->start = start; +static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) +{ + return gather && gather->queued; } /* PCI device grouping function */ @@ -679,18 +756,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain, return 0; } -static inline size_t iommu_map_sg(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot) +static inline ssize_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) { - return 0; + return -ENODEV; } -static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, +static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { - return 0; + return -ENODEV; } static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) @@ -870,6 +947,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, { } +static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) +{ + return false; +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index e9bfe6972aed..3f53bc27a19b 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -6,46 +6,22 @@ #include <linux/sched/rt.h> #include <linux/iocontext.h> -/* - * Gives us 8 prio classes with 13-bits of data for each class - */ -#define IOPRIO_CLASS_SHIFT (13) -#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) - -#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) -#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) -#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) - -#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) +#include <uapi/linux/ioprio.h> /* - * These are the io priority groups as implemented by CFQ. RT is the realtime - * class, it always gets premium service. BE is the best-effort scheduling - * class, the default for any process. IDLE is the idle scheduling class, it - * is only served when no one else is using the disk. + * Default IO priority. */ -enum { - IOPRIO_CLASS_NONE, - IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, - IOPRIO_CLASS_IDLE, -}; +#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM) /* - * 8 best effort priority levels are supported + * Check that a priority value has a valid class. */ -#define IOPRIO_BE_NR (8) - -enum { - IOPRIO_WHO_PROCESS = 1, - IOPRIO_WHO_PGRP, - IOPRIO_WHO_USER, -}; +static inline bool ioprio_valid(unsigned short ioprio) +{ + unsigned short class = IOPRIO_PRIO_CLASS(ioprio); -/* - * Fallback BE priority - */ -#define IOPRIO_NORM (4) + return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE; +} /* * if process has set io priority explicitly, use that. if not, convert @@ -80,7 +56,7 @@ static inline int get_current_ioprio(void) if (ioc) return ioc->ioprio; - return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + return IOPRIO_DEFAULT; } /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 70b2ad3b9884..ef4a69865737 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -76,6 +76,9 @@ struct ipv6_devconf { __s32 disable_policy; __s32 ndisc_tclass; __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 31b347c9f8dd..c8293c817646 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,7 @@ enum irqchip_irq_state; * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable * IRQ_HIDDEN - Don't show up in /proc/interrupts + * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -99,6 +100,7 @@ enum { IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), IRQ_HIDDEN = (1 << 20), + IRQ_NO_DEBUG = (1 << 21), }; #define IRQF_MODIFY_MASK \ @@ -567,6 +569,7 @@ struct irq_chip { * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -579,6 +582,7 @@ enum { IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), }; #include <linux/irqdesc.h> diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index fa8c0455c352..1177f3a1aed5 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -7,8 +7,7 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H #define __LINUX_IRQCHIP_ARM_GIC_COMMON_H -#include <linux/types.h> -#include <linux/ioport.h> +#include <linux/irqchip/arm-vgic-info.h> #define GICD_INT_DEF_PRI 0xa0 #define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ @@ -16,28 +15,6 @@ (GICD_INT_DEF_PRI << 8) |\ GICD_INT_DEF_PRI) -enum gic_type { - GIC_V2, - GIC_V3, -}; - -struct gic_kvm_info { - /* GIC type */ - enum gic_type type; - /* Virtual CPU interface */ - struct resource vcpu; - /* Interrupt number */ - unsigned int maint_irq; - /* Virtual control interface */ - struct resource vctrl; - /* vlpi support */ - bool has_v4; - /* rvpeid support */ - bool has_v4_1; -}; - -const struct gic_kvm_info *gic_get_kvm_info(void); - struct irq_domain; struct fwnode_handle; int gicv2m_init(struct fwnode_handle *parent_handle, diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h new file mode 100644 index 000000000000..a75b2c7de69d --- /dev/null +++ b/include/linux/irqchip/arm-vgic-info.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/linux/irqchip/arm-vgic-info.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + */ +#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H +#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H + +#include <linux/types.h> +#include <linux/ioport.h> + +enum gic_type { + /* Full GICv2 */ + GIC_V2, + /* Full GICv3, optionally with v2 compat */ + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* No interrupt mask, no need to use the above field */ + bool no_maint_irq_mask; + /* Virtual control interface */ + struct resource vctrl; + /* vlpi support */ + bool has_v4; + /* rvpeid support */ + bool has_v4_1; + /* Deactivation impared, subpar stuff */ + bool no_hw_deactivation; +}; + +#ifdef CONFIG_KVM +void vgic_set_kvm_info(const struct gic_kvm_info *info); +#else +static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {} +#endif + +#endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index df4651250785..59aea39785bf 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -158,25 +158,21 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc) desc->handle_irq(desc); } +int handle_irq_desc(struct irq_desc *desc); int generic_handle_irq(unsigned int irq); -#ifdef CONFIG_HANDLE_DOMAIN_IRQ +#ifdef CONFIG_IRQ_DOMAIN /* * Convert a HW interrupt number to a logical one using a IRQ domain, * and handle the result interrupt number. Return -EINVAL if - * conversion failed. Providing a NULL domain indicates that the - * conversion has already been done. + * conversion failed. */ -int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, - bool lookup, struct pt_regs *regs); +int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); -static inline int handle_domain_irq(struct irq_domain *domain, - unsigned int hwirq, struct pt_regs *regs) -{ - return __handle_domain_irq(domain, hwirq, true, regs); -} +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs); -#ifdef CONFIG_IRQ_DOMAIN int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs); #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 62a8e3d23829..23e4ee523576 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -41,13 +41,11 @@ struct fwnode_handle; struct irq_domain; struct irq_chip; struct irq_data; +struct irq_desc; struct cpumask; struct seq_file; struct irq_affinity_desc; -/* Number of irqs reserved for a legacy isa controller */ -#define NUM_ISA_INTERRUPTS 16 - #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 /** @@ -152,11 +150,10 @@ struct irq_domain_chip_generic; * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain - * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that - * support direct mapping - * @revmap_size: Size of the linear map table @linear_revmap[] + * @revmap_size: Size of the linear map table @revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map - * @linear_revmap: Linear table of hwirq->virq reverse mappings + * @revmap_mutex: Lock for the revmap + * @revmap: Linear table of irq_data pointers */ struct irq_domain { struct list_head link; @@ -176,11 +173,10 @@ struct irq_domain { /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; - unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct mutex revmap_tree_mutex; - unsigned int linear_revmap[]; + struct mutex revmap_mutex; + struct irq_data __rcu *revmap[]; }; /* Irq domain flags */ @@ -210,6 +206,9 @@ enum { */ IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), + /* Irq domain doesn't translate anything */ + IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -348,6 +347,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no { return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } + +#ifdef CONFIG_IRQ_DOMAIN_NOMAP static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, @@ -355,14 +356,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod { return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } -static inline struct irq_domain *irq_domain_add_legacy_isa( - struct device_node *of_node, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, - host_data); -} + +extern unsigned int irq_create_direct_mapping(struct irq_domain *host); +#endif + static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) @@ -405,25 +402,37 @@ static inline unsigned int irq_create_mapping(struct irq_domain *host, return irq_create_mapping_affinity(host, hwirq, NULL); } +extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq, + unsigned int *irq); + +static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + return __irq_resolve_mapping(domain, hwirq, NULL); +} /** - * irq_linear_revmap() - Find a linux irq from a hw irq number. + * irq_find_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space - * - * This is a fast path alternative to irq_find_mapping() that can be - * called directly by irq controller code to save a handful of - * instructions. It is always safe to call, but won't find irqs mapped - * using the radix tree. */ +static inline unsigned int irq_find_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + unsigned int irq; + + if (__irq_resolve_mapping(domain, hwirq, &irq)) + return irq; + + return 0; +} + static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { - return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; + return irq_find_mapping(domain, hwirq); } -extern unsigned int irq_find_mapping(struct irq_domain *host, - irq_hw_number_t hwirq); -extern unsigned int irq_create_direct_mapping(struct irq_domain *host); extern const struct irq_domain_ops irq_domain_simple_ops; diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index b7b45ca82bea..790e7fcfc1a6 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -13,26 +13,22 @@ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -#include <linux/acpi.h> +#include <linux/types.h> /* - * Logical location of iSCSI Boot Format Table. - * If the value is NULL there is no iBFT on the machine. + * Physical location of iSCSI Boot Format Table. + * If the value is 0 there is no iBFT on the machine. */ -extern struct acpi_table_ibft *ibft_addr; +extern phys_addr_t ibft_phys_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The - * mapped address is set in the ibft_addr variable. + * physical address is set in the ibft_phys_addr variable. */ #ifdef CONFIG_ISCSI_IBFT_FIND -unsigned long find_ibft_region(unsigned long *sizep); +void reserve_ibft_region(void); #else -static inline unsigned long find_ibft_region(unsigned long *sizep) -{ - *sizep = 0; - return 0; -} +static inline void reserve_ibft_region(void) {} #endif #endif /* ISCSI_IBFT_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index db0e1920cb12..fd933c45281a 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -780,6 +780,11 @@ struct journal_s unsigned long j_flags; /** + * @j_atomic_flags: Atomic journaling state flags. + */ + unsigned long j_atomic_flags; + + /** * @j_errno: * * Is there an outstanding uncleared error on the journal (from a prior @@ -905,6 +910,29 @@ struct journal_s struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; /** + * @j_shrinker: + * + * Journal head shrinker, reclaim buffer's journal head which + * has been written back. + */ + struct shrinker j_shrinker; + + /** + * @j_checkpoint_jh_count: + * + * Number of journal buffers on the checkpoint list. [j_list_lock] + */ + struct percpu_counter j_checkpoint_jh_count; + + /** + * @j_shrink_transaction: + * + * Record next transaction will shrink on the checkpoint list. + * [j_list_lock] + */ + transaction_t *j_shrink_transaction; + + /** * @j_head: * * Journal head: identifies the first unused block in the journal. @@ -1370,6 +1398,16 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) * mode */ #define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */ #define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */ +#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001 +#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002 +#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \ + JBD2_JOURNAL_FLUSH_ZEROOUT) + +/* + * Journal atomic flag definitions + */ +#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing + * buffer back to disk */ /* * Function declarations for the journaling transaction and buffer @@ -1407,6 +1445,7 @@ extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); +unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); @@ -1500,7 +1539,7 @@ extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); -extern int jbd2_journal_flush (journal_t *); +extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); extern void jbd2_journal_lock_updates (journal_t *); extern void jbd2_journal_unlock_updates (journal_t *); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 05f5554d860f..48b9b2a82767 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry) return (unsigned long)entry->key & 2UL; } -static inline void jump_entry_set_init(struct jump_entry *entry) +static inline void jump_entry_set_init(struct jump_entry *entry, bool set) { - entry->key |= 2; + if (set) + entry->key |= 2; + else + entry->key &= ~2; +} + +static inline int jump_entry_size(struct jump_entry *entry) +{ +#ifdef JUMP_LABEL_NOP_SIZE + return JUMP_LABEL_NOP_SIZE; +#else + return arch_jump_entry_size(entry); +#endif } #endif diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 465060acc981..a1d6fc82d7f0 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -7,6 +7,7 @@ #define _LINUX_KALLSYMS_H #include <linux/errno.h> +#include <linux/buildid.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/mm.h> @@ -15,8 +16,10 @@ #include <asm/sections.h> #define KSYM_NAME_LEN 128 -#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ - 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \ + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \ + (BUILD_ID_SIZE_MAX * 2) + 1) struct cred; struct module; @@ -91,8 +94,10 @@ const char *kallsyms_lookup(unsigned long addr, /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_build_id(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); +extern int sprint_backtrace_build_id(char *buffer, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -128,6 +133,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } +static inline int sprint_symbol_build_id(char *buffer, unsigned long address) +{ + *buffer = '\0'; + return 0; +} + static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; @@ -140,6 +151,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr) return 0; } +static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h new file mode 100644 index 000000000000..4f85f562512c --- /dev/null +++ b/include/linux/kasan-tags.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_TAGS_H +#define _LINUX_KASAN_TAGS_H + +#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ +#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ +#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ + +#ifdef CONFIG_KASAN_HW_TAGS +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ +#else +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ +#endif + +#endif /* LINUX_KASAN_TAGS_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b1678a61e6a7..dd874a1ee862 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,8 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include <linux/bug.h> +#include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> @@ -17,7 +19,6 @@ struct task_struct; /* kasan_data struct is used in KUnit tests for KASAN expected failures */ struct kunit_kasan_expectation { - bool report_expected; bool report_found; }; @@ -41,9 +42,9 @@ struct kunit_kasan_expectation { #endif extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; -extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]; -extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; -extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; +extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; +extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; +extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; int kasan_populate_early_shadow(const void *shadow_start, @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {} #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -#ifdef CONFIG_KASAN - -struct kasan_cache { - int alloc_meta_offset; - int free_meta_offset; - bool is_kmalloc; -}; - #ifdef CONFIG_KASAN_HW_TAGS DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); @@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void) return kasan_enabled(); } +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); +void kasan_free_pages(struct page *page, unsigned int order); + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) { - return true; + return IS_ENABLED(CONFIG_KASAN); } static inline bool kasan_has_integrated_init(void) @@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void) return false; } +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order, gfp_t flags) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + #endif /* CONFIG_KASAN_HW_TAGS */ +#ifdef CONFIG_KASAN + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; + bool is_kmalloc; +}; + slab_flags_t __kasan_never_merge(void); static __always_inline slab_flags_t kasan_never_merge(void) { @@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_alloc_pages(struct page *page, +void __kasan_poison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order, init); + __kasan_poison_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order, bool init) +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_unpoison_pages(struct page *page, + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order, init); + __kasan_unpoison_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ -static inline bool kasan_enabled(void) -{ - return false; -} -static inline bool kasan_has_integrated_init(void) -{ - return false; -} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_poison_pages(struct page *page, unsigned int order, + bool init) {} +static inline void kasan_unpoison_pages(struct page *page, unsigned int order, + bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index cc8fa109cfa3..20d1079e92b4 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -51,7 +51,8 @@ /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 - * otherwise. + * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in + * autoconf.h. */ #define IS_MODULE(option) __is_defined(option##_MODULE) @@ -66,7 +67,8 @@ /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', - * 0 otherwise. + * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in + * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1". */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) diff --git a/include/linux/kcore.h b/include/linux/kcore.h index da676cdbd727..86c0f1d18998 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -11,14 +11,11 @@ enum kcore_type { KCORE_RAM, KCORE_VMEMMAP, KCORE_USER, - KCORE_OTHER, - KCORE_REMAP, }; struct kcore_list { struct list_head list; unsigned long addr; - unsigned long vaddr; size_t size; int type; }; diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 0125a677b67f..ea0f5e580fac 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,6 +13,8 @@ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> */ +#include <linux/list.h> + /* Shifted versions of the command enable bits are be used if the command * has no arguments (see kdb_check_flags). This allows commands, such as * go, to have different permissions depending upon whether it is called @@ -64,6 +66,17 @@ typedef enum { typedef int (*kdb_func_t)(int, const char **); +/* The KDB shell command table */ +typedef struct _kdbtab { + char *name; /* Command name */ + kdb_func_t func; /* Function to execute command */ + char *usage; /* Usage String for this command */ + char *help; /* Help message for this command */ + short minlen; /* Minimum legal # cmd chars required */ + kdb_cmdflags_t flags; /* Command behaviour flags */ + struct list_head list_node; /* Command list */ +} kdbtab_t; + #ifdef CONFIG_KGDB_KDB #include <linux/init.h> #include <linux/sched.h> @@ -193,19 +206,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) #endif /* ! CONFIG_KALLSYMS */ /* Dynamic kdb shell command registration */ -extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_flags(char *, kdb_func_t, char *, char *, - short, kdb_cmdflags_t); -extern int kdb_unregister(char *); +extern int kdb_register(kdbtab_t *cmd); +extern void kdb_unregister(kdbtab_t *cmd); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} -static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen, - kdb_cmdflags_t flags) { return 0; } -static inline int kdb_unregister(char *cmd) { return 0; } +static inline int kdb_register(kdbtab_t *cmd) { return 0; } +static inline void kdb_unregister(kdbtab_t *cmd) {} #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 15d8bad3d2f2..2776423a587e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -2,7 +2,7 @@ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/align.h> #include <linux/limits.h> #include <linux/linkage.h> @@ -10,10 +10,12 @@ #include <linux/types.h> #include <linux/compiler.h> #include <linux/bitops.h> +#include <linux/kstrtox.h> #include <linux/log2.h> #include <linux/math.h> #include <linux/minmax.h> #include <linux/typecheck.h> +#include <linux/panic.h> #include <linux/printk.h> #include <linux/build_bug.h> #include <linux/static_call_types.h> @@ -71,8 +73,19 @@ */ #define lower_32_bits(n) ((u32)((n) & 0xffffffff)) +/** + * upper_16_bits - return bits 16-31 of a number + * @n: the number we're accessing + */ +#define upper_16_bits(n) ((u16)((n) >> 16)) + +/** + * lower_16_bits - return bits 0-15 of a number + * @n: the number we're accessing + */ +#define lower_16_bits(n) ((u16)((n) & 0xffff)) + struct completion; -struct pt_regs; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY @@ -177,159 +190,9 @@ void __might_fault(const char *file, int line); static inline void might_fault(void) { } #endif -extern struct atomic_notifier_head panic_notifier_list; -extern long (*panic_blink)(int state); -__printf(1, 2) -void panic(const char *fmt, ...) __noreturn __cold; -void nmi_panic(struct pt_regs *regs, const char *msg); -extern void oops_enter(void); -extern void oops_exit(void); -extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; -/* Internal, do not use. */ -int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); -int __must_check _kstrtol(const char *s, unsigned int base, long *res); - -int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); -int __must_check kstrtoll(const char *s, unsigned int base, long long *res); - -/** - * kstrtoul - convert a string to an unsigned long - * @s: The start of the string. The string must be null-terminated, and may also - * include a single newline before its terminating null. The first character - * may also be a plus sign, but not a minus sign. - * @base: The number base to use. The maximum supported base is 16. If base is - * given as 0, then the base of the string is automatically detected with the - * conventional semantics - If it begins with 0x the number will be parsed as a - * hexadecimal (case insensitive), if it otherwise begins with 0, it will be - * parsed as an octal number. Otherwise it will be parsed as a decimal. - * @res: Where to write the result of the conversion on success. - * - * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Preferred over simple_strtoul(). Return code must be checked. -*/ -static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) -{ - /* - * We want to shortcut function call, but - * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. - */ - if (sizeof(unsigned long) == sizeof(unsigned long long) && - __alignof__(unsigned long) == __alignof__(unsigned long long)) - return kstrtoull(s, base, (unsigned long long *)res); - else - return _kstrtoul(s, base, res); -} - -/** - * kstrtol - convert a string to a long - * @s: The start of the string. The string must be null-terminated, and may also - * include a single newline before its terminating null. The first character - * may also be a plus sign or a minus sign. - * @base: The number base to use. The maximum supported base is 16. If base is - * given as 0, then the base of the string is automatically detected with the - * conventional semantics - If it begins with 0x the number will be parsed as a - * hexadecimal (case insensitive), if it otherwise begins with 0, it will be - * parsed as an octal number. Otherwise it will be parsed as a decimal. - * @res: Where to write the result of the conversion on success. - * - * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Preferred over simple_strtol(). Return code must be checked. - */ -static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) -{ - /* - * We want to shortcut function call, but - * __builtin_types_compatible_p(long, long long) = 0. - */ - if (sizeof(long) == sizeof(long long) && - __alignof__(long) == __alignof__(long long)) - return kstrtoll(s, base, (long long *)res); - else - return _kstrtol(s, base, res); -} - -int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); -int __must_check kstrtoint(const char *s, unsigned int base, int *res); - -static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) -{ - return kstrtoull(s, base, res); -} - -static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) -{ - return kstrtoll(s, base, res); -} - -static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) -{ - return kstrtouint(s, base, res); -} - -static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) -{ - return kstrtoint(s, base, res); -} - -int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); -int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); -int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); -int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); -int __must_check kstrtobool(const char *s, bool *res); - -int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); -int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); -int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); -int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); -int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); -int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); -int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); -int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); -int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); -int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); -int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); - -static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) -{ - return kstrtoull_from_user(s, count, base, res); -} - -static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) -{ - return kstrtoll_from_user(s, count, base, res); -} - -static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) -{ - return kstrtouint_from_user(s, count, base, res); -} - -static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) -{ - return kstrtoint_from_user(s, count, base, res); -} - -/* - * Use kstrto<foo> instead. - * - * NOTE: simple_strto<foo> does not check for the range overflow and, - * depending on the input, may give interesting results. - * - * Use these functions if and only if you cannot use kstrto<foo>, because - * the conversion ends on the first non-digit character, which may be far - * beyond the supported range. It might be useful to parse the strings like - * 10x50 or 12:21 without altering original string or temporary buffer in use. - * Keep in mind above caveat. - */ - -extern unsigned long simple_strtoul(const char *,char **,unsigned int); -extern long simple_strtol(const char *,char **,unsigned int); -extern unsigned long long simple_strtoull(const char *,char **,unsigned int); -extern long long simple_strtoll(const char *,char **,unsigned int); - extern int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); @@ -357,6 +220,8 @@ int sscanf(const char *, const char *, ...); extern __scanf(2, 0) int vsscanf(const char *, const char *, va_list); +extern int no_hash_pointers_enable(char *str); + extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); @@ -370,52 +235,8 @@ extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); -#ifdef CONFIG_SMP -extern unsigned int sysctl_oops_all_cpu_backtrace; -#else -#define sysctl_oops_all_cpu_backtrace 0 -#endif /* CONFIG_SMP */ - extern void bust_spinlocks(int yes); -extern int panic_timeout; -extern unsigned long panic_print; -extern int panic_on_oops; -extern int panic_on_unrecovered_nmi; -extern int panic_on_io_nmi; -extern int panic_on_warn; -extern unsigned long panic_on_taint; -extern bool panic_on_taint_nousertaint; -extern int sysctl_panic_on_rcu_stall; -extern int sysctl_max_rcu_stall_to_panic; -extern int sysctl_panic_on_stackoverflow; - -extern bool crash_kexec_post_notifiers; - -/* - * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It - * holds a CPU number which is executing panic() currently. A value of - * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). - */ -extern atomic_t panic_cpu; -#define PANIC_CPU_INVALID -1 -/* - * Only to be used by arch init code. If the user over-wrote the default - * CONFIG_PANIC_TIMEOUT, honor it. - */ -static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) -{ - if (panic_timeout == arch_default_timeout) - panic_timeout = timeout; -} -extern const char *print_tainted(void); -enum lockdep_ok { - LOCKDEP_STILL_OK, - LOCKDEP_NOW_UNRELIABLE -}; -extern void add_taint(unsigned flag, enum lockdep_ok); -extern int test_taint(unsigned flag); -extern unsigned long get_taint(void); extern int root_mountflags; extern bool early_boot_irqs_disabled; @@ -434,36 +255,6 @@ extern enum system_states { SYSTEM_SUSPEND, } system_state; -/* This cannot be an enum because some may be used in assembly source. */ -#define TAINT_PROPRIETARY_MODULE 0 -#define TAINT_FORCED_MODULE 1 -#define TAINT_CPU_OUT_OF_SPEC 2 -#define TAINT_FORCED_RMMOD 3 -#define TAINT_MACHINE_CHECK 4 -#define TAINT_BAD_PAGE 5 -#define TAINT_USER 6 -#define TAINT_DIE 7 -#define TAINT_OVERRIDDEN_ACPI_TABLE 8 -#define TAINT_WARN 9 -#define TAINT_CRAP 10 -#define TAINT_FIRMWARE_WORKAROUND 11 -#define TAINT_OOT_MODULE 12 -#define TAINT_UNSIGNED_MODULE 13 -#define TAINT_SOFTLOCKUP 14 -#define TAINT_LIVEPATCH 15 -#define TAINT_AUX 16 -#define TAINT_RANDSTRUCT 17 -#define TAINT_FLAGS_COUNT 18 -#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) - -struct taint_flag { - char c_true; /* character printed when tainted */ - char c_false; /* character printed when not tainted */ - bool module; /* also show as a per-module taint flag */ -}; - -extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; - extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 9e8ca8743c26..1093abf7c28c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -98,6 +98,11 @@ struct kernfs_elem_dir { * better directly in kernfs_node but is here to save space. */ struct kernfs_root *root; + /* + * Monotonic revision counter, used to identify if a directory + * node has changed during negative dentry revalidation. + */ + unsigned long rev; }; struct kernfs_elem_symlink { @@ -188,7 +193,7 @@ struct kernfs_root { u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; - /* list of kernfs_super_info of this root, protected by kernfs_mutex */ + /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ struct list_head supers; wait_queue_head_t deactivate_waitq; diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 392a3670944c..258cdde8d356 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -105,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); */ /** - * kgdb_arch_init - Perform any architecture specific initalization. + * kgdb_arch_init - Perform any architecture specific initialization. * - * This function will handle the initalization of any architecture + * This function will handle the initialization of any architecture * specific callbacks. */ extern int kgdb_arch_init(void); @@ -229,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); /** - * kgdb_arch_late - Perform any architecture specific initalization. + * kgdb_arch_late - Perform any architecture specific initialization. * - * This function will handle the late initalization of any + * This function will handle the late initialization of any * architecture specific callbacks. This is an optional function for * handling things like late initialization of hw breakpoints. The * default implementation does nothing. diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 1883a4a9f16a..e4f3bfe08757 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -54,8 +54,6 @@ struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); -typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, - int trapnr); typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, struct pt_regs *); @@ -83,12 +81,6 @@ struct kprobe { /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; - /* - * ... called if executing addr causes a fault (eg. page fault). - * Return 1 if it handled fault, otherwise kernel will see it. - */ - kprobe_fault_handler_t fault_handler; - /* Saved opcode (which has been replaced with breakpoint) */ kprobe_opcode_t opcode; @@ -407,7 +399,9 @@ int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); -void free_insn_page(void *page); + +void *alloc_optinsn_page(void); +void free_optinsn_page(void *page); int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h new file mode 100644 index 000000000000..529974e22ea7 --- /dev/null +++ b/include/linux/kstrtox.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KSTRTOX_H +#define _LINUX_KSTRTOX_H + +#include <linux/compiler.h> +#include <linux/types.h> + +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); + +/** + * kstrtoul - convert a string to an unsigned long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Preferred over simple_strtoul(). Return code must be checked. +*/ +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +/** + * kstrtol - convert a string to a long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Preferred over simple_strtol(). Return code must be checked. + */ +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); + +int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); +int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); +int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); +int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); +int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); +int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); +int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); +int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); +int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); +int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); + +static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} + +/* + * Use kstrto<foo> instead. + * + * NOTE: simple_strto<foo> does not check for the range overflow and, + * depending on the input, may give interesting results. + * + * Use these functions if and only if you cannot use kstrto<foo>, because + * the conversion ends on the first non-digit character, which may be far + * beyond the supported range. It might be useful to parse the strings like + * 10x50 or 12:21 without altering original string or temporary buffer in use. + * Keep in mind above caveat. + */ + +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} + +#endif /* _LINUX_KSTRTOX_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2484ed97e72f..346b0f269161 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -18,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), * @threadfn: the function to run in the thread * @data: data pointer for @threadfn() * @namefmt: printf-style format string for the thread name - * @arg...: arguments for @namefmt. + * @arg: arguments for @namefmt. * * This macro will create a kthread on the current node, leaving it in * the stopped state. This is just a helper for kthread_create_on_node(); @@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); +void set_kthread_struct(struct task_struct *p); + void kthread_set_per_cpu(struct task_struct *k, int cpu); bool kthread_is_per_cpu(struct task_struct *k); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 76102efbf079..041ca7f15ea4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -28,6 +28,7 @@ #include <linux/rcuwait.h> #include <linux/refcount.h> #include <linux/nospec.h> +#include <linux/notifier.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -149,6 +150,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 +#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -157,6 +159,15 @@ static inline bool is_error_page(struct page *page) }) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except, + unsigned long *vcpu_bitmap, cpumask_var_t tmp); +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except); +bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap); + #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -304,7 +315,6 @@ struct kvm_vcpu { struct pid __rcu *pid; int sigset_active; sigset_t sigset; - struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; bool valid_wakeup; @@ -341,7 +351,16 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; + struct kvm_vcpu_stat stat; + char stats_id[KVM_STATS_NAME_SIZE]; struct kvm_dirty_ring dirty_ring; + + /* + * The index of the most recently used memslot by this vCPU. It's ok + * if this becomes stale due to memslot changes since we always check + * it is a valid slot. + */ + int last_used_slot; }; /* must be called with irqs disabled */ @@ -510,7 +529,7 @@ struct kvm_memslots { u64 generation; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; - atomic_t lru_slot; + atomic_t last_used_slot; int used_slots; struct kvm_memory_slot memslots[]; }; @@ -523,10 +542,24 @@ struct kvm { #endif /* KVM_HAVE_MMU_RWLOCK */ struct mutex slots_lock; + + /* + * Protects the arch-specific fields of struct kvm_memory_slots in + * use by the VM. To be used under the slots_lock (above) or in a + * kvm->srcu critical section where acquiring the slots_lock would + * lead to deadlock with the synchronize_srcu in + * install_new_memslots. + */ + struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + /* Used to wait for completion of MMU notifiers. */ + spinlock_t mn_invalidate_lock; + unsigned long mn_active_invalidate_count; + struct rcuwait mn_memslots_update_rcuwait; + /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -585,6 +618,12 @@ struct kvm { pid_t userspace_pid; unsigned int max_halt_poll_ns; u32 dirty_ring_size; + bool vm_bugged; + +#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER + struct notifier_block pm_notifier; +#endif + char stats_id[KVM_STATS_NAME_SIZE]; }; #define kvm_err(fmt, ...) \ @@ -613,6 +652,30 @@ struct kvm { #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +static inline void kvm_vm_bugged(struct kvm *kvm) +{ + kvm->vm_bugged = true; + kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); +} + +#define KVM_BUG(cond, kvm, fmt...) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + +#define KVM_BUG_ON(cond, kvm) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) { return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); @@ -704,6 +767,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); +bool kvm_get_kvm_safe(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); bool file_is_kvm(struct file *file); void kvm_put_kvm_no_destroy(struct kvm *kvm); @@ -808,7 +872,6 @@ void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); -void kvm_get_pfn(kvm_pfn_t pfn); void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, @@ -927,14 +990,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); #endif -bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, - unsigned long *vcpu_bitmap, cpumask_var_t tmp); -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); -bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap); +void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); +void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -998,6 +1057,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); +#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER +int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); +#endif + #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif @@ -1014,6 +1077,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); +int kvm_arch_create_vm_debugfs(struct kvm *kvm); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* @@ -1137,29 +1201,49 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* - * search_memslots() and __gfn_to_memslot() are here because they are - * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. - * gfn_to_memslot() itself isn't here as an inline because that would - * bloat other code too much. + * Returns a pointer to the memslot at slot_index if it contains gfn. + * Otherwise returns NULL. + */ +static inline struct kvm_memory_slot * +try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +{ + struct kvm_memory_slot *slot; + + if (slot_index < 0 || slot_index >= slots->used_slots) + return NULL; + + /* + * slot_index can come from vcpu->last_used_slot which is not kept + * in sync with userspace-controllable memslot deletion. So use nospec + * to prevent the CPU from speculating past the end of memslots[]. + */ + slot_index = array_index_nospec(slot_index, slots->used_slots); + slot = &slots->memslots[slot_index]; + + if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) + return slot; + else + return NULL; +} + +/* + * Returns a pointer to the memslot that contains gfn and records the index of + * the slot in index. Otherwise returns NULL. * * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) { int start = 0, end = slots->used_slots; - int slot = atomic_read(&slots->lru_slot); struct kvm_memory_slot *memslots = slots->memslots; + struct kvm_memory_slot *slot; if (unlikely(!slots->used_slots)) return NULL; - if (gfn >= memslots[slot].base_gfn && - gfn < memslots[slot].base_gfn + memslots[slot].npages) - return &memslots[slot]; - while (start < end) { - slot = start + (end - start) / 2; + int slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; @@ -1167,25 +1251,51 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (start < slots->used_slots && gfn >= memslots[start].base_gfn && - gfn < memslots[start].base_gfn + memslots[start].npages) { - atomic_set(&slots->lru_slot, start); - return &memslots[start]; + slot = try_get_memslot(slots, start, gfn); + if (slot) { + *index = start; + return slot; } return NULL; } +/* + * __gfn_to_memslot() and its descendants are here because it is called from + * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() + * itself isn't here as an inline because that would bloat other code too much. + */ static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { - return search_memslots(slots, gfn); + struct kvm_memory_slot *slot; + int slot_index = atomic_read(&slots->last_used_slot); + + slot = try_get_memslot(slots, slot_index, gfn); + if (slot) + return slot; + + slot = search_memslots(slots, gfn, &slot_index); + if (slot) { + atomic_set(&slots->last_used_slot, slot_index); + return slot; + } + + return NULL; } static inline unsigned long __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { - return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; + /* + * The index was checked originally in search_memslots. To avoid + * that a malicious guest builds a Spectre gadget out of e.g. page + * table walks, do not let the processor speculate loads outside + * the guest's registered memslots. + */ + unsigned long offset = gfn - slot->base_gfn; + offset = array_index_nospec(offset, slot->npages); + return slot->userspace_addr + offset * PAGE_SIZE; } static inline int memslot_id(struct kvm *kvm, gfn_t gfn) @@ -1236,27 +1346,173 @@ enum kvm_stat_kind { struct kvm_stat_data { struct kvm *kvm; - struct kvm_stats_debugfs_item *dbgfs_item; -}; - -struct kvm_stats_debugfs_item { - const char *name; - int offset; + const struct _kvm_stats_desc *desc; enum kvm_stat_kind kind; - int mode; }; -#define KVM_DBGFS_GET_MODE(dbgfs_item) \ - ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) +struct _kvm_stats_desc { + struct kvm_stats_desc desc; + char name[KVM_STATS_NAME_SIZE]; +}; -#define VM_STAT(n, x, ...) \ - { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } -#define VCPU_STAT(n, x, ...) \ - { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } +#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ + .flags = type | unit | base | \ + BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ + BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ + BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ + .exponent = exp, \ + .size = sz, \ + .bucket_size = bsz + +#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vm_stat, generic.stat) \ + }, \ + .name = #stat, \ + } +#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ + }, \ + .name = #stat, \ + } +#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vm_stat, stat) \ + }, \ + .name = #stat, \ + } +#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ + { \ + { \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vcpu_stat, stat) \ + }, \ + .name = #stat, \ + } +/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ +#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ + SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) + +#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ + unit, base, exponent, 1, 0) +#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ + unit, base, exponent, 1, 0) +#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ + unit, base, exponent, 1, 0) +#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ + unit, base, exponent, sz, bsz) +#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ + unit, base, exponent, sz, 0) + +/* Cumulative counter, read/write */ +#define STATS_DESC_COUNTER(SCOPE, name) \ + STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) +/* Instantaneous counter, read only */ +#define STATS_DESC_ICOUNTER(SCOPE, name) \ + STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) +/* Peak counter, read/write */ +#define STATS_DESC_PCOUNTER(SCOPE, name) \ + STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ + KVM_STATS_BASE_POW10, 0) + +/* Cumulative time in nanosecond */ +#define STATS_DESC_TIME_NSEC(SCOPE, name) \ + STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9) +/* Linear histogram for time in nanosecond */ +#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ + STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9, sz, bsz) +/* Logarithmic histogram for time in nanosecond */ +#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ + STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9, sz) + +#define KVM_GENERIC_VM_STATS() \ + STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ + STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) + +#define KVM_GENERIC_VCPU_STATS() \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ + STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ + HALT_POLL_HIST_COUNT), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ + HALT_POLL_HIST_COUNT), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ + HALT_POLL_HIST_COUNT) -extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; +ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, + const struct _kvm_stats_desc *desc, + void *stats, size_t size_stats, + char __user *user_buffer, size_t size, loff_t *offset); + +/** + * kvm_stats_linear_hist_update() - Update bucket value for linear histogram + * statistics data. + * + * @data: start address of the stats data + * @size: the number of bucket of the stats data + * @value: the new value used to update the linear histogram's bucket + * @bucket_size: the size (width) of a bucket + */ +static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, + u64 value, size_t bucket_size) +{ + size_t index = div64_u64(value, bucket_size); + + index = min(index, size - 1); + ++data[index]; +} + +/** + * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram + * statistics data. + * + * @data: start address of the stats data + * @size: the number of bucket of the stats data + * @value: the new value used to update the logarithmic histogram's bucket + */ +static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) +{ + size_t index = fls64(value); + + index = min(index, size - 1); + ++data[index]; +} + +#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ + kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) +#define KVM_STATS_LOG_HIST_UPDATE(array, value) \ + kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) + + +extern const struct kvm_stats_header kvm_vm_stats_header; +extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; +extern const struct kvm_stats_header kvm_vcpu_stats_header; +extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) { diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index a7580f69dda0..2237abb93ccd 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -76,5 +76,26 @@ struct kvm_mmu_memory_cache { }; #endif +#define HALT_POLL_HIST_COUNT 32 + +struct kvm_vm_stat_generic { + u64 remote_tlb_flush; + u64 remote_tlb_flush_requests; +}; + +struct kvm_vcpu_stat_generic { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_wait_ns; + u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; + u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; + u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; +}; + +#define KVM_STATS_NAME_SIZE 48 #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 329fd914cf24..a0b730be40ad 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -33,6 +33,12 @@ enum led_brightness { LED_FULL = 255, }; +enum led_default_state { + LEDS_DEFSTATE_OFF = 0, + LEDS_DEFSTATE_ON = 1, + LEDS_DEFSTATE_KEEP = 2, +}; + struct led_init_data { /* device fwnode handle */ struct fwnode_handle *fwnode; @@ -520,9 +526,9 @@ struct gpio_led { /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; }; -#define LEDS_GPIO_DEFSTATE_OFF 0 -#define LEDS_GPIO_DEFSTATE_ON 1 -#define LEDS_GPIO_DEFSTATE_KEEP 2 +#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF +#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON +#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP struct gpio_led_platform_data { int num_leds; diff --git a/include/linux/libata.h b/include/linux/libata.h index 5f550eb27f81..c0c64f03e107 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -161,6 +161,10 @@ enum { ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ + ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ + ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ + ATA_DFLAG_NCQ_PRIO, + ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ @@ -422,6 +426,7 @@ enum { ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ + ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -535,6 +540,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) extern struct device_attribute dev_attr_unload_heads; #ifdef CONFIG_SATA_HOST extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_ncq_prio_supported; extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; @@ -1397,25 +1403,28 @@ extern struct device_attribute *ata_common_sdev_attrs[]; ATA_SCSI_COMPAT_IOCTL \ .queuecommand = ata_scsi_queuecmd, \ .dma_need_drain = ata_scsi_dma_need_drain, \ - .can_queue = ATA_DEF_QUEUE, \ - .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ .proc_name = drv_name, \ - .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity -#define ATA_BASE_SHT(drv_name) \ +#define ATA_SUBBASE_SHT(drv_name) \ __ATA_BASE_SHT(drv_name), \ + .can_queue = ATA_DEF_QUEUE, \ + .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ + .slave_configure = ata_scsi_slave_config + +#define ATA_BASE_SHT(drv_name) \ + ATA_SUBBASE_SHT(drv_name), \ .sdev_attrs = ata_common_sdev_attrs #ifdef CONFIG_SATA_HOST extern struct device_attribute *ata_ncq_sdev_attrs[]; #define ATA_NCQ_SHT(drv_name) \ - __ATA_BASE_SHT(drv_name), \ + ATA_SUBBASE_SHT(drv_name), \ .sdev_attrs = ata_ncq_sdev_attrs, \ .change_queue_depth = ata_scsi_change_queue_depth #endif @@ -1451,7 +1460,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline bool ata_is_host_link(const struct ata_link *link) { - return 1; + return true; } #endif /* CONFIG_SATA_PMP */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 89b69e645ac7..7074aa9af525 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -278,6 +278,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } +void nvdimm_delete(struct nvdimm *nvdimm); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h deleted file mode 100644 index 0908abda9c1b..000000000000 --- a/include/linux/lightnvm.h +++ /dev/null @@ -1,697 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef NVM_H -#define NVM_H - -#include <linux/blkdev.h> -#include <linux/types.h> -#include <uapi/linux/lightnvm.h> - -enum { - NVM_IO_OK = 0, - NVM_IO_REQUEUE = 1, - NVM_IO_DONE = 2, - NVM_IO_ERR = 3, - - NVM_IOTYPE_NONE = 0, - NVM_IOTYPE_GC = 1, -}; - -/* common format */ -#define NVM_GEN_CH_BITS (8) -#define NVM_GEN_LUN_BITS (8) -#define NVM_GEN_BLK_BITS (16) -#define NVM_GEN_RESERVED (32) - -/* 1.2 format */ -#define NVM_12_PG_BITS (16) -#define NVM_12_PL_BITS (4) -#define NVM_12_SEC_BITS (4) -#define NVM_12_RESERVED (8) - -/* 2.0 format */ -#define NVM_20_SEC_BITS (24) -#define NVM_20_RESERVED (8) - -enum { - NVM_OCSSD_SPEC_12 = 12, - NVM_OCSSD_SPEC_20 = 20, -}; - -struct ppa_addr { - /* Generic structure for all addresses */ - union { - /* generic device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 reserved : NVM_GEN_RESERVED; - } a; - - /* 1.2 device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 pg : NVM_12_PG_BITS; - u64 pl : NVM_12_PL_BITS; - u64 sec : NVM_12_SEC_BITS; - u64 reserved : NVM_12_RESERVED; - } g; - - /* 2.0 device format */ - struct { - u64 grp : NVM_GEN_CH_BITS; - u64 pu : NVM_GEN_LUN_BITS; - u64 chk : NVM_GEN_BLK_BITS; - u64 sec : NVM_20_SEC_BITS; - u64 reserved : NVM_20_RESERVED; - } m; - - struct { - u64 line : 63; - u64 is_cached : 1; - } c; - - u64 ppa; - }; -}; - -struct nvm_rq; -struct nvm_id; -struct nvm_dev; -struct nvm_tgt_dev; -struct nvm_chk_meta; - -typedef int (nvm_id_fn)(struct nvm_dev *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, - struct nvm_chk_meta *); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); -typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, - dma_addr_t *); -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); - -struct nvm_dev_ops { - nvm_id_fn *identity; - nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb_tbl; - - nvm_get_chk_meta_fn *get_chk_meta; - - nvm_submit_io_fn *submit_io; - - nvm_create_dma_pool_fn *create_dma_pool; - nvm_destroy_dma_pool_fn *destroy_dma_pool; - nvm_dev_dma_alloc_fn *dev_dma_alloc; - nvm_dev_dma_free_fn *dev_dma_free; -}; - -#ifdef CONFIG_NVM - -#include <linux/file.h> -#include <linux/dmapool.h> - -enum { - /* HW Responsibilities */ - NVM_RSP_L2P = 1 << 0, - NVM_RSP_ECC = 1 << 1, - - /* Physical Adressing Mode */ - NVM_ADDRMODE_LINEAR = 0, - NVM_ADDRMODE_CHANNEL = 1, - - /* Plane programming mode for LUN */ - NVM_PLANE_SINGLE = 1, - NVM_PLANE_DOUBLE = 2, - NVM_PLANE_QUAD = 4, - - /* Status codes */ - NVM_RSP_SUCCESS = 0x0, - NVM_RSP_NOT_CHANGEABLE = 0x1, - NVM_RSP_ERR_FAILWRITE = 0x40ff, - NVM_RSP_ERR_EMPTYPAGE = 0x42ff, - NVM_RSP_ERR_FAILECC = 0x4281, - NVM_RSP_ERR_FAILCRC = 0x4004, - NVM_RSP_WARN_HIGHECC = 0x4700, - - /* Device opcodes */ - NVM_OP_PWRITE = 0x91, - NVM_OP_PREAD = 0x92, - NVM_OP_ERASE = 0x90, - - /* PPA Command Flags */ - NVM_IO_SNGL_ACCESS = 0x0, - NVM_IO_DUAL_ACCESS = 0x1, - NVM_IO_QUAD_ACCESS = 0x2, - - /* NAND Access Modes */ - NVM_IO_SUSPEND = 0x80, - NVM_IO_SLC_MODE = 0x100, - NVM_IO_SCRAMBLE_ENABLE = 0x200, - - /* Block Types */ - NVM_BLK_T_FREE = 0x0, - NVM_BLK_T_BAD = 0x1, - NVM_BLK_T_GRWN_BAD = 0x2, - NVM_BLK_T_DEV = 0x4, - NVM_BLK_T_HOST = 0x8, - - /* Memory capabilities */ - NVM_ID_CAP_SLC = 0x1, - NVM_ID_CAP_CMD_SUSPEND = 0x2, - NVM_ID_CAP_SCRAMBLE = 0x4, - NVM_ID_CAP_ENCRYPT = 0x8, - - /* Memory types */ - NVM_ID_FMTYPE_SLC = 0, - NVM_ID_FMTYPE_MLC = 1, - - /* Device capabilities */ - NVM_ID_DCAP_BBLKMGMT = 0x1, - NVM_UD_DCAP_ECC = 0x2, -}; - -struct nvm_id_lp_mlc { - u16 num_pairs; - u8 pairs[886]; -}; - -struct nvm_id_lp_tbl { - __u8 id[8]; - struct nvm_id_lp_mlc mlc; -}; - -struct nvm_addrf_12 { - u8 ch_len; - u8 lun_len; - u8 blk_len; - u8 pg_len; - u8 pln_len; - u8 sec_len; - - u8 ch_offset; - u8 lun_offset; - u8 blk_offset; - u8 pg_offset; - u8 pln_offset; - u8 sec_offset; - - u64 ch_mask; - u64 lun_mask; - u64 blk_mask; - u64 pg_mask; - u64 pln_mask; - u64 sec_mask; -}; - -struct nvm_addrf { - u8 ch_len; - u8 lun_len; - u8 chk_len; - u8 sec_len; - u8 rsv_len[2]; - - u8 ch_offset; - u8 lun_offset; - u8 chk_offset; - u8 sec_offset; - u8 rsv_off[2]; - - u64 ch_mask; - u64 lun_mask; - u64 chk_mask; - u64 sec_mask; - u64 rsv_mask[2]; -}; - -enum { - /* Chunk states */ - NVM_CHK_ST_FREE = 1 << 0, - NVM_CHK_ST_CLOSED = 1 << 1, - NVM_CHK_ST_OPEN = 1 << 2, - NVM_CHK_ST_OFFLINE = 1 << 3, - - /* Chunk types */ - NVM_CHK_TP_W_SEQ = 1 << 0, - NVM_CHK_TP_W_RAN = 1 << 1, - NVM_CHK_TP_SZ_SPEC = 1 << 4, -}; - -/* - * Note: The structure size is linked to nvme_nvm_chk_meta such that the same - * buffer can be used when converting from little endian to cpu addressing. - */ -struct nvm_chk_meta { - u8 state; - u8 type; - u8 wi; - u8 rsvd[5]; - u64 slba; - u64 cnlb; - u64 wp; -}; - -struct nvm_target { - struct list_head list; - struct nvm_tgt_dev *dev; - struct nvm_tgt_type *type; - struct gendisk *disk; -}; - -#define ADDR_EMPTY (~0ULL) - -#define NVM_TARGET_DEFAULT_OP (101) -#define NVM_TARGET_MIN_OP (3) -#define NVM_TARGET_MAX_OP (80) - -#define NVM_VERSION_MAJOR 1 -#define NVM_VERSION_MINOR 0 -#define NVM_VERSION_PATCH 0 - -#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ - -struct nvm_rq; -typedef void (nvm_end_io_fn)(struct nvm_rq *); - -struct nvm_rq { - struct nvm_tgt_dev *dev; - - struct bio *bio; - - union { - struct ppa_addr ppa_addr; - dma_addr_t dma_ppa_list; - }; - - struct ppa_addr *ppa_list; - - void *meta_list; - dma_addr_t dma_meta_list; - - nvm_end_io_fn *end_io; - - uint8_t opcode; - uint16_t nr_ppas; - uint16_t flags; - - u64 ppa_status; /* ppa media status */ - int error; - - int is_seq; /* Sequential hint flag. 1.2 only */ - - void *private; -}; - -static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) -{ - return pdu - sizeof(struct nvm_rq); -} - -static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) -{ - return rqdata + 1; -} - -static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) -{ - return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; -} - -enum { - NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ - NVM_BLK_ST_BAD = 0x8, /* Bad block */ -}; - -/* Instance geometry */ -struct nvm_geo { - /* device reported version */ - u8 major_ver_id; - u8 minor_ver_id; - - /* kernel short version */ - u8 version; - - /* instance specific geometry */ - int num_ch; - int num_lun; /* per channel */ - - /* calculated values */ - int all_luns; /* across channels */ - int all_chunks; /* across channels */ - - int op; /* over-provision in instance */ - - sector_t total_secs; /* across channels */ - - /* chunk geometry */ - u32 num_chk; /* chunks per lun */ - u32 clba; /* sectors per chunk */ - u16 csecs; /* sector size */ - u16 sos; /* out-of-band area size */ - bool ext; /* metadata in extended data buffer */ - u32 mdts; /* Max data transfer size*/ - - /* device write constrains */ - u32 ws_min; /* minimum write size */ - u32 ws_opt; /* optimal write size */ - u32 mw_cunits; /* distance required for successful read */ - u32 maxoc; /* maximum open chunks */ - u32 maxocpu; /* maximum open chunks per parallel unit */ - - /* device capabilities */ - u32 mccap; - - /* device timings */ - u32 trdt; /* Avg. Tread (ns) */ - u32 trdm; /* Max Tread (ns) */ - u32 tprt; /* Avg. Tprog (ns) */ - u32 tprm; /* Max Tprog (ns) */ - u32 tbet; /* Avg. Terase (ns) */ - u32 tbem; /* Max Terase (ns) */ - - /* generic address format */ - struct nvm_addrf addrf; - - /* 1.2 compatibility */ - u8 vmnt; - u32 cap; - u32 dom; - - u8 mtype; - u8 fmtype; - - u16 cpar; - u32 mpos; - - u8 num_pln; - u8 pln_mode; - u16 num_pg; - u16 fpg_sz; -}; - -/* sub-device structure */ -struct nvm_tgt_dev { - /* Device information */ - struct nvm_geo geo; - - /* Base ppas for target LUNs */ - struct ppa_addr *luns; - - struct request_queue *q; - - struct nvm_dev *parent; - void *map; -}; - -struct nvm_dev { - struct nvm_dev_ops *ops; - - struct list_head devices; - - /* Device information */ - struct nvm_geo geo; - - unsigned long *lun_map; - void *dma_pool; - - /* Backend device */ - struct request_queue *q; - char name[DISK_NAME_LEN]; - void *private_data; - - struct kref ref; - void *rmap; - - struct mutex mlock; - spinlock_t lock; - - /* target management */ - struct list_head area_list; - struct list_head targets; -}; - -static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; - l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; - l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; - l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; - l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; - l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; - l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; - l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; - } - - return l; -} - -static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - l.ppa = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; - l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; - l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; - l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; - l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; - l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; - l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; - l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; - } - - return l; -} - -static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, - struct ppa_addr p) -{ - struct nvm_geo *geo = &dev->geo; - u64 caddr; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; - - caddr = (u64)p.g.pg << ppaf->pg_offset; - caddr |= (u64)p.g.pl << ppaf->pln_offset; - caddr |= (u64)p.g.sec << ppaf->sec_offset; - } else { - caddr = p.m.sec; - } - - return caddr; -} - -static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, - void *addrf, u32 ppa32) -{ - struct ppa_addr ppa64; - - ppa64.ppa = 0; - - if (ppa32 == -1) { - ppa64.ppa = ADDR_EMPTY; - } else if (ppa32 & (1U << 31)) { - ppa64.c.line = ppa32 & ((~0U) >> 1); - ppa64.c.is_cached = 1; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> - ppaf->ch_offset; - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> - ppaf->lun_offset; - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> - ppaf->blk_offset; - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> - ppaf->pg_offset; - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> - ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> - ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> - lbaf->ch_offset; - ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> - lbaf->lun_offset; - ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> - lbaf->chk_offset; - ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> - lbaf->sec_offset; - } - } - - return ppa64; -} - -static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, - void *addrf, struct ppa_addr ppa64) -{ - u32 ppa32 = 0; - - if (ppa64.ppa == ADDR_EMPTY) { - ppa32 = ~0U; - } else if (ppa64.c.is_cached) { - ppa32 |= ppa64.c.line; - ppa32 |= 1U << 31; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa32 |= ppa64.g.ch << ppaf->ch_offset; - ppa32 |= ppa64.g.lun << ppaf->lun_offset; - ppa32 |= ppa64.g.blk << ppaf->blk_offset; - ppa32 |= ppa64.g.pg << ppaf->pg_offset; - ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa32 |= ppa64.m.grp << lbaf->ch_offset; - ppa32 |= ppa64.m.pu << lbaf->lun_offset; - ppa32 |= ppa64.m.chk << lbaf->chk_offset; - ppa32 |= ppa64.m.sec << lbaf->sec_offset; - } - } - - return ppa32; -} - -static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, - struct ppa_addr *ppa) -{ - struct nvm_geo *geo = &dev->geo; - int last = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - int sec = ppa->g.sec; - - sec++; - if (sec == geo->ws_min) { - int pg = ppa->g.pg; - - sec = 0; - pg++; - if (pg == geo->num_pg) { - int pl = ppa->g.pl; - - pg = 0; - pl++; - if (pl == geo->num_pln) - last = 1; - - ppa->g.pl = pl; - } - ppa->g.pg = pg; - } - ppa->g.sec = sec; - } else { - ppa->m.sec++; - if (ppa->m.sec == geo->clba) - last = 1; - } - - return last; -} - -typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, - int flags); -typedef void (nvm_tgt_exit_fn)(void *, bool); -typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); -typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); - -enum { - NVM_TGT_F_DEV_L2P = 0, - NVM_TGT_F_HOST_L2P = 1 << 0, -}; - -struct nvm_tgt_type { - const char *name; - unsigned int version[3]; - int flags; - - /* target entry points */ - const struct block_device_operations *bops; - nvm_tgt_capacity_fn *capacity; - - /* module-specific init/teardown */ - nvm_tgt_init_fn *init; - nvm_tgt_exit_fn *exit; - - /* sysfs */ - nvm_tgt_sysfs_init_fn *sysfs_init; - nvm_tgt_sysfs_exit_fn *sysfs_exit; - - /* For internal use */ - struct list_head list; - struct module *owner; -}; - -extern int nvm_register_tgt_type(struct nvm_tgt_type *); -extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); - -extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); -extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); - -extern struct nvm_dev *nvm_alloc_dev(int); -extern int nvm_register(struct nvm_dev *); -extern void nvm_unregister(struct nvm_dev *); - -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, - int, struct nvm_chk_meta *); -extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, - int, int); -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern void nvm_end_io(struct nvm_rq *); - -#else /* CONFIG_NVM */ -struct nvm_dev_ops; - -static inline struct nvm_dev *nvm_alloc_dev(int node) -{ - return ERR_PTR(-EINVAL); -} -static inline int nvm_register(struct nvm_dev *dev) -{ - return -EINVAL; -} -static inline void nvm_unregister(struct nvm_dev *dev) {} -#endif /* CONFIG_NVM */ -#endif /* LIGHTNVM.H */ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h index 17b5943727d5..fd3d0b358f22 100644 --- a/include/linux/linear_range.h +++ b/include/linux/linear_range.h @@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r, int linear_range_get_selector_high(const struct linear_range *r, unsigned int val, unsigned int *selector, bool *found); +void linear_range_get_selector_within(const struct linear_range *r, + unsigned int val, unsigned int *selector); int linear_range_get_selector_low_array(const struct linear_range *r, int ranges, unsigned int val, unsigned int *selector, bool *found); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 9dcaa3e582c9..1b5fceb565df 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -146,7 +146,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. @@ -172,7 +172,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru, * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. - * @isolate: callback function that is resposible for deciding what to do with + * @isolate: callback function that is responsible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. diff --git a/include/linux/litex.h b/include/linux/litex.h index 5ea9ccf5cce4..f2edb86d5f44 100644 --- a/include/linux/litex.h +++ b/include/linux/litex.h @@ -11,18 +11,6 @@ #include <linux/io.h> -/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */ -#if defined(CONFIG_LITEX_SUBREG_SIZE) && \ - (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4) -#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE -#else -#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1! -#endif -#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8) - -/* LiteX subregisters of any width are always aligned on a 4-byte boundary */ -#define LITEX_SUBREG_ALIGN 0x4 - static inline void _write_litex_subregister(u32 val, void __iomem *addr) { writel((u32 __force)cpu_to_le32(val), addr); @@ -42,115 +30,54 @@ static inline u32 _read_litex_subregister(void __iomem *addr) * 32-bit wide logical CSR will be laid out as four 32-bit physical * subregisters, each one containing one byte of meaningful data. * - * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus - */ - -/* number of LiteX subregisters needed to store a register of given reg_size */ -#define _litex_num_subregs(reg_size) \ - (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1) - -/* - * since the number of 4-byte aligned subregisters required to store a single - * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the - * next adjacent LiteX CSR register w.r.t. the offset of the current one also - * depends on how many subregisters the latter is spread across - */ -#define _next_reg_off(off, size) \ - ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN) - -/* - * The purpose of `_litex_[set|get]_reg()` is to implement the logic of - * writing to/reading from the LiteX CSR in a single place that can be then - * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()` - * accessors for the appropriate data width. - * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly - * discouraged, as they perform no error checking on the requested data width! - */ - -/** - * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register) - * @reg: Address of the CSR - * @reg_size: The width of the CSR expressed in the number of bytes - * @val: Value to be written to the CSR + * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which + * means that only larger-than-32-bit CSRs will be split across multiple + * subregisters (e.g., a 64-bit CSR will be spread across two consecutive + * 32-bit subregisters). * - * This function splits a single (possibly multi-byte) LiteX CSR write into - * a series of subregister writes with a proper offset. - * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). - */ -static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val) -{ - u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT; - - while (shift > 0) { - shift -= LITEX_SUBREG_SIZE_BIT; - _write_litex_subregister(val >> shift, reg); - reg += LITEX_SUBREG_ALIGN; - } -} - -/** - * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register) - * @reg: Address of the CSR - * @reg_size: The width of the CSR expressed in the number of bytes - * - * Return: Value read from the CSR - * - * This function generates a series of subregister reads with a proper offset - * and joins their results into a single (possibly multi-byte) LiteX CSR value. - * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)). + * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus */ -static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size) -{ - u64 r; - u8 i; - - r = _read_litex_subregister(reg); - for (i = 1; i < _litex_num_subregs(reg_size); i++) { - r <<= LITEX_SUBREG_SIZE_BIT; - reg += LITEX_SUBREG_ALIGN; - r |= _read_litex_subregister(reg); - } - return r; -} static inline void litex_write8(void __iomem *reg, u8 val) { - _litex_set_reg(reg, sizeof(u8), val); + _write_litex_subregister(val, reg); } static inline void litex_write16(void __iomem *reg, u16 val) { - _litex_set_reg(reg, sizeof(u16), val); + _write_litex_subregister(val, reg); } static inline void litex_write32(void __iomem *reg, u32 val) { - _litex_set_reg(reg, sizeof(u32), val); + _write_litex_subregister(val, reg); } static inline void litex_write64(void __iomem *reg, u64 val) { - _litex_set_reg(reg, sizeof(u64), val); + _write_litex_subregister(val >> 32, reg); + _write_litex_subregister(val, reg + 4); } static inline u8 litex_read8(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u8)); + return _read_litex_subregister(reg); } static inline u16 litex_read16(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u16)); + return _read_litex_subregister(reg); } static inline u32 litex_read32(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u32)); + return _read_litex_subregister(reg); } static inline u64 litex_read64(void __iomem *reg) { - return _litex_get_reg(reg, sizeof(u64)); + return ((u64)_read_litex_subregister(reg) << 32) | + _read_litex_subregister(reg + 4); } #endif /* _LINUX_LITEX_H */ diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ded90b097e6e..975e33b793a7 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,8 @@ #include <linux/percpu-defs.h> #include <linux/lockdep.h> +#ifndef CONFIG_PREEMPT_RT + typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -14,29 +16,14 @@ typedef struct { } local_lock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LL_DEP_MAP_INIT(lockname) \ +# define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ - .lock_type = LD_LOCK_PERCPU, \ - } -#else -# define LL_DEP_MAP_INIT(lockname) -#endif - -#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } - -#define __local_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ - lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \ - LD_WAIT_CONFIG, LD_WAIT_INV, \ - LD_LOCK_PERCPU); \ -} while (0) + .lock_type = LD_LOCK_PERCPU, \ + }, \ + .owner = NULL, -#ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void local_lock_acquire(local_lock_t *l) { lock_map_acquire(&l->dep_map); @@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l) lock_map_release(&l->dep_map); } +static inline void local_lock_debug_init(local_lock_t *l) +{ + l->owner = NULL; +} #else /* CONFIG_DEBUG_LOCK_ALLOC */ +# define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } +static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ +#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_PERCPU); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { } local_lock_release(this_cpu_ptr(lock)); \ local_irq_restore(flags); \ } while (0) + +#else /* !CONFIG_PREEMPT_RT */ + +/* + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the + * critical section while staying preemptible. + */ +typedef spinlock_t local_lock_t; + +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) + +#define __local_lock_init(l) \ + do { \ + local_spin_lock_init((l)); \ + } while (0) + +#define __local_lock(__lock) \ + do { \ + migrate_disable(); \ + spin_lock(this_cpu_ptr((__lock))); \ + } while (0) + +#define __local_lock_irq(lock) __local_lock(lock) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __local_lock(lock); \ + } while (0) + +#define __local_unlock(__lock) \ + do { \ + spin_unlock(this_cpu_ptr((__lock))); \ + migrate_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) __local_unlock(lock) + +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) + +#endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 0520c0cd73f4..3bc9f7410e21 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -27,7 +27,8 @@ struct rpc_task; struct nlmsvc_binding { __be32 (*fopen)(struct svc_rqst *, struct nfs_fh *, - struct file **); + struct file **, + int mode); void (*fclose)(struct file *); }; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 666f5f310a04..c4ae6506b8b3 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -10,6 +10,8 @@ #ifndef LINUX_LOCKD_LOCKD_H #define LINUX_LOCKD_LOCKD_H +/* XXX: a lot of this should really be under fs/lockd. */ + #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> @@ -154,7 +156,8 @@ struct nlm_rqst { struct nlm_file { struct hlist_node f_list; /* linked list */ struct nfs_fh f_handle; /* NFS file handle */ - struct file * f_file; /* VFS file pointer */ + struct file * f_file[2]; /* VFS file pointers, + indexed by O_ flags */ struct nlm_share * f_shares; /* DOS shares */ struct list_head f_blocks; /* blocked locks */ unsigned int f_locks; /* guesstimate # of locks */ @@ -267,6 +270,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); /* * Server-side lock handling */ +int lock_to_openmode(struct file_lock *); __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); @@ -286,7 +290,7 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); * File handling for the server personality */ __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, - struct nfs_fh *); + struct nlm_lock *); void nlm_release_file(struct nlm_file *); void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); @@ -301,7 +305,8 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { - return locks_inode(file->f_file); + return locks_inode(file->f_file[O_RDONLY] ? + file->f_file[O_RDONLY] : file->f_file[O_WRONLY]); } static inline int __nlm_privileged_request4(const struct sockaddr *sap) diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 7ab9f264313f..a98309c0121c 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -109,11 +109,5 @@ int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); -/* -int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); - */ #endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index e709fe5924f2..5ae766f26e04 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -37,12 +37,7 @@ int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); -/* -int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); -int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); - */ + extern const struct rpc_version nlm_version4; #endif /* LOCKD_XDR4_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 5cf387813754..9fe165beb0f9 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -306,31 +306,29 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && \ - lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ - } while (0) +#define lockdep_assert(cond) \ + do { WARN_ON(debug_locks && !(cond)); } while (0) -#define lockdep_assert_not_held(l) do { \ - WARN_ON(debug_locks && \ - lockdep_is_held(l) == LOCK_STATE_HELD); \ - } while (0) +#define lockdep_assert_once(cond) \ + do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) -#define lockdep_assert_held_write(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ - } while (0) +#define lockdep_assert_held(l) \ + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) -#define lockdep_assert_held_read(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ - } while (0) +#define lockdep_assert_not_held(l) \ + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) -#define lockdep_assert_held_once(l) do { \ - WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ - } while (0) +#define lockdep_assert_held_write(l) \ + lockdep_assert(lockdep_is_held_type(l, 0)) -#define lockdep_assert_none_held_once() do { \ - WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ - } while (0) +#define lockdep_assert_held_read(l) \ + lockdep_assert(lockdep_is_held_type(l, 1)) + +#define lockdep_assert_held_once(l) \ + lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) + +#define lockdep_assert_none_held_once() \ + lockdep_assert_once(!current->lockdep_depth) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -407,6 +405,9 @@ extern int lock_is_held(const void *); extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) +#define lockdep_assert(c) do { } while (0) +#define lockdep_assert_once(c) do { } while (0) + #define lockdep_assert_held(l) do { (void)(l); } while (0) #define lockdep_assert_not_held(l) do { (void)(l); } while (0) #define lockdep_assert_held_write(l) do { (void)(l); } while (0) diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 2ec9ff5a7fff..3e726ace5c62 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -52,7 +52,7 @@ enum lockdep_lock_type { * NR_LOCKDEP_CACHING_CLASSES ... Number of classes * cached in the instance of lockdep_map * - * Currently main class (subclass == 0) and signle depth subclass + * Currently main class (subclass == 0) and single depth subclass * are cached in lockdep_map. This optimization is mainly targeting * on rq->lock. double_rq_lock() acquires this highly competitive with * single depth. diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h new file mode 100644 index 000000000000..3fa65c964379 --- /dev/null +++ b/include/linux/logic_iomem.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: johannes@sipsolutions.net + */ +#ifndef __LOGIC_IOMEM_H +#define __LOGIC_IOMEM_H +#include <linux/types.h> +#include <linux/ioport.h> + +/** + * struct logic_iomem_ops - emulated IO memory ops + * @read: read an 8, 16, 32 or 64 bit quantity from the given offset, + * size is given in bytes (1, 2, 4 or 8) + * (64-bit only necessary if CONFIG_64BIT is set) + * @write: write an 8, 16 32 or 64 bit quantity to the given offset, + * size is given in bytes (1, 2, 4 or 8) + * (64-bit only necessary if CONFIG_64BIT is set) + * @set: optional, for memset_io() + * @copy_from: optional, for memcpy_fromio() + * @copy_to: optional, for memcpy_toio() + * @unmap: optional, this region is getting unmapped + */ +struct logic_iomem_ops { + unsigned long (*read)(void *priv, unsigned int offset, int size); + void (*write)(void *priv, unsigned int offset, int size, + unsigned long val); + + void (*set)(void *priv, unsigned int offset, u8 value, int size); + void (*copy_from)(void *priv, void *buffer, unsigned int offset, + int size); + void (*copy_to)(void *priv, unsigned int offset, const void *buffer, + int size); + + void (*unmap)(void *priv); +}; + +/** + * struct logic_iomem_region_ops - ops for an IO memory handler + * @map: map a range in the registered IO memory region, must + * fill *ops with the ops and may fill *priv to be passed + * to the ops. The offset is given as the offset into the + * registered resource region. + * The return value is negative for errors, or >= 0 for + * success. On success, the return value is added to the + * offset for later ops, to allow for partial mappings. + */ +struct logic_iomem_region_ops { + long (*map)(unsigned long offset, size_t size, + const struct logic_iomem_ops **ops, + void **priv); +}; + +/** + * logic_iomem_add_region - register an IO memory region + * @resource: the resource description for this region + * @ops: the IO memory mapping ops for this resource + */ +int logic_iomem_add_region(struct resource *resource, + const struct logic_iomem_region_ops *ops); + +#endif /* __LOGIC_IOMEM_H */ diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index 429d67d815ce..07add7882a5d 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there) Because of this later property, it is called "lru_cache". As it actually Tracks Objects in an Active SeT, we could also call it toast (incidentally that is what may happen to the data on the - backend storage uppon next resync, if we don't get it right). + backend storage upon next resync, if we don't get it right). What for? @@ -152,7 +152,7 @@ struct lc_element { * for paranoia, and for "lc_element_to_index" */ unsigned lc_index; /* if we want to track a larger set of objects, - * it needs to become arch independend u64 */ + * it needs to become an architecture independent u64 */ unsigned lc_number; /* special label when on free list */ #define LC_FREE (~0U) @@ -263,7 +263,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * * Allows (expects) the set to be "dirty". Note that the reference counts and * order on the active and lru lists may still change. Used to serialize - * changing transactions. Returns true if we aquired the lock. + * changing transactions. Returns true if we acquired the lock. */ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) { @@ -275,7 +275,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Only works on a "clean" set. Returns true if we aquired the + * still change. Only works on a "clean" set. Returns true if we acquired the * lock, which means there are no pending changes, and any further attempt to * change the set will not succeed until the next lc_unlock(). */ diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index cd23355d2271..17d02eda9538 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -48,13 +48,13 @@ struct lsm_ioctlop_audit { }; struct lsm_ibpkey_audit { - u64 subnet_prefix; - u16 pkey; + u64 subnet_prefix; + u16 pkey; }; struct lsm_ibendport_audit { - char dev_name[IB_DEVICE_NAME_MAX]; - u8 port; + const char *dev_name; + u8 port; }; /* Auxiliary data to use in generating the audit record. */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 04c01794de83..2adeea44c0d5 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -358,8 +358,7 @@ LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) -LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, - u8 dir) +LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid) LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index a7330eb3ec64..7dd1f01ec4f9 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -18,7 +18,6 @@ #ifndef mISDNIF_H #define mISDNIF_H -#include <stdarg.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/socket.h> diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index d5a983d65f05..44365aab043c 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -65,14 +65,10 @@ enum cmdq_code { CMDQ_CODE_LOGIC = 0xa0, }; -enum cmdq_cb_status { - CMDQ_CB_NORMAL = 0, - CMDQ_CB_ERROR -}; - struct cmdq_cb_data { - enum cmdq_cb_status sta; + int sta; void *data; + struct cmdq_pkt *pkt; }; typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data); diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db26..0f06c2287b52 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 -/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/math64.h b/include/linux/math64.h index 66deb1fdc2ef..2928f03d6d46 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -3,6 +3,7 @@ #define _LINUX_MATH64_H #include <linux/types.h> +#include <linux/math.h> #include <vdso/math64.h> #include <asm/div64.h> @@ -234,6 +235,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) #endif +#ifndef mul_s64_u64_shr +static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift) +{ + u64 ret; + + /* + * Extract the sign before the multiplication and put it back + * afterwards if needed. + */ + ret = mul_u64_u64_shr(abs(a), b, shift); + + if (a < 0) + ret = -((s64) ret); + + return ret; +} +#endif /* mul_s64_u64_shr */ + #ifndef mul_u64_u32_div static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) { diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h deleted file mode 100644 index 593602fc9317..000000000000 --- a/include/linux/max17040_battery.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2009 Samsung Electronics - * Minkyu Kang <mk7.kang@samsung.com> - */ - -#ifndef __MAX17040_BATTERY_H_ -#define __MAX17040_BATTERY_H_ - -struct max17040_platform_data { - int (*battery_online)(void); - int (*charger_online)(void); - int (*charger_enable)(void); -}; - -#endif diff --git a/include/linux/mcb.h b/include/linux/mcb.h index 71dd10a3d928..f6efb16f9d1b 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -120,7 +120,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv, __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void mcb_unregister_driver(struct mcb_driver *driver); #define module_mcb_driver(__mcb_driver) \ - module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); + module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver) extern void mcb_bus_add_devices(const struct mcb_bus *bus); extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 1fb34ea394ad..68427e8fadeb 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -55,6 +55,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * register the device to mdev module. * * @owner: The module owner. + * @device_driver: Which device driver to probe() on newly created devices * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory @@ -71,11 +72,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * @mdev: mdev_device device structure which is being * destroyed * Returns integer: success (0) or error (< 0) - * @open: Open mediated device. - * @mdev: mediated device. - * Returns integer: success (0) or error (< 0) - * @release: release mediated device - * @mdev: mediated device. * @read: Read emulation callback * @mdev: mediated device structure * @buf: read buffer @@ -103,14 +99,15 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); **/ struct mdev_parent_ops { struct module *owner; + struct mdev_driver *device_driver; const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; int (*create)(struct mdev_device *mdev); int (*remove)(struct mdev_device *mdev); - int (*open)(struct mdev_device *mdev); - void (*release)(struct mdev_device *mdev); + int (*open_device)(struct mdev_device *mdev); + void (*close_device)(struct mdev_device *mdev); ssize_t (*read)(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 07f5ef8fc456..c6786c12b207 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -91,12 +91,13 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); mei_cldev_driver_register,\ mei_cldev_driver_unregister) -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, + size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 vtag); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, @@ -114,6 +115,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); -bool mei_cldev_enabled(struct mei_cl_device *cldev); +bool mei_cldev_enabled(const struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5984fff3f175..b066024c62e3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -30,7 +30,9 @@ extern unsigned long long max_possible_pfn; * @MEMBLOCK_NONE: no special request * @MEMBLOCK_HOTPLUG: hotpluggable region * @MEMBLOCK_MIRROR: mirrored region - * @MEMBLOCK_NOMAP: don't add to kernel direct mapping + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as + * reserved in the memory map; refer to memblock_mark_nomap() description + * for further details */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ @@ -50,7 +52,7 @@ struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int nid; #endif }; @@ -97,8 +99,6 @@ void memblock_discard(void); static inline void memblock_discard(void) {} #endif -phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); @@ -207,7 +207,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_mem_range_rev - reverse iterate through memblock areas from @@ -218,7 +218,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range_rev(i, p_start, p_end) \ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_reserved_mem_range - iterate over all reserved memblock areas @@ -347,7 +347,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; @@ -366,7 +366,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } -#endif /* CONFIG_NEED_MULTIPLE_NODES */ +#endif /* CONFIG_NUMA */ /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c193be760709..3096c9a0ee01 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -105,14 +105,6 @@ struct mem_cgroup_reclaim_iter { unsigned int generation; }; -struct lruvec_stat { - long count[NR_VM_NODE_STAT_ITEMS]; -}; - -struct batched_lruvec_stat { - s32 count[NR_VM_NODE_STAT_ITEMS]; -}; - /* * Bitmap and deferred work of shrinker::id corresponding to memcg-aware * shrinkers, which have elements charged to this memcg. @@ -123,24 +115,30 @@ struct shrinker_info { unsigned long *map; }; +struct lruvec_stats_percpu { + /* Local (CPU and cgroup) state */ + long state[NR_VM_NODE_STAT_ITEMS]; + + /* Delta calculation for lockless upward propagation */ + long state_prev[NR_VM_NODE_STAT_ITEMS]; +}; + +struct lruvec_stats { + /* Aggregated (CPU and subtree) state */ + long state[NR_VM_NODE_STAT_ITEMS]; + + /* Pending child counts during tree propagation */ + long state_pending[NR_VM_NODE_STAT_ITEMS]; +}; + /* * per-node information in memory controller. */ struct mem_cgroup_per_node { struct lruvec lruvec; - /* - * Legacy local VM stats. This should be struct lruvec_stat and - * cannot be optimized to struct batched_lruvec_stat. Because - * the threshold of the lruvec_stat_cpu can be as big as - * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this - * filed has no upper limit. - */ - struct lruvec_stat __percpu *lruvec_stat_local; - - /* Subtree VM stats (batched updates) */ - struct batched_lruvec_stat __percpu *lruvec_stat_cpu; - atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; + struct lruvec_stats lruvec_stats; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -192,7 +190,7 @@ enum memcg_kmem_state { struct memcg_padding { char x[0]; } ____cacheline_internodealigned_in_smp; -#define MEMCG_PADDING(name) struct memcg_padding name; +#define MEMCG_PADDING(name) struct memcg_padding name #else #define MEMCG_PADDING(name) #endif @@ -349,8 +347,7 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif - struct mem_cgroup_per_node *nodeinfo[0]; - /* WARNING: nodeinfo must be the last member here */ + struct mem_cgroup_per_node *nodeinfo[]; }; /* @@ -596,13 +593,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) } #endif -static __always_inline bool memcg_stat_item_in_bytes(int idx) -{ - if (idx == MEMCG_PERCPU_B) - return true; - return vmstat_item_in_bytes(idx); -} - static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); @@ -613,12 +603,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -654,13 +647,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -694,13 +684,35 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) page_counter_read(&memcg->memory); } -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +int __mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask); +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) +{ + if (mem_cgroup_disabled()) + return 0; + return __mem_cgroup_charge(page, mm, gfp_mask); +} + int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); -void mem_cgroup_uncharge(struct page *page); -void mem_cgroup_uncharge_list(struct list_head *page_list); +void __mem_cgroup_uncharge(struct page *page); +static inline void mem_cgroup_uncharge(struct page *page) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge(page); +} + +void __mem_cgroup_uncharge_list(struct list_head *page_list); +static inline void mem_cgroup_uncharge_list(struct list_head *page_list) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge_list(page_list); +} void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); @@ -743,35 +755,18 @@ out: /** * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page - * @pgdat: pgdat of the page * * This function relies on page->mem_cgroup being stable. */ -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct pglist_data *pgdat) +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) { + pg_data_t *pgdat = page_pgdat(page); struct mem_cgroup *memcg = page_memcg(page); VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); return mem_cgroup_lruvec(memcg, pgdat); } -static inline bool lruvec_holds_page_lru_lock(struct page *page, - struct lruvec *lruvec) -{ - pg_data_t *pgdat = page_pgdat(page); - const struct mem_cgroup *memcg; - struct mem_cgroup_per_node *mz; - - if (mem_cgroup_disabled()) - return lruvec == &pgdat->__lruvec; - - mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - memcg = page_memcg(page) ? : root_mem_cgroup; - - return lruvec->pgdat == pgdat && mz->memcg == memcg; -} - struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -902,11 +897,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return !!(memcg->css.flags & CSS_ONLINE); } -/* - * For memory reclaim. - */ -int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); - void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); @@ -973,22 +963,21 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, local_irq_restore(flags); } +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + return READ_ONCE(memcg->vmstats.state[idx]); +} + static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { struct mem_cgroup_per_node *pn; - long x; if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - x = atomic_long_read(&pn->lruvec_stat[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; + return READ_ONCE(pn->lruvec_stats.state[idx]); } static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, @@ -1003,7 +992,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); for_each_possible_cpu(cpu) - x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); + x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -1011,6 +1000,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } +void mem_cgroup_flush_stats(void); + void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); @@ -1165,11 +1156,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1221,18 +1213,11 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, return &pgdat->__lruvec; } -static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct pglist_data *pgdat) -{ - return &pgdat->__lruvec; -} - -static inline bool lruvec_holds_page_lru_lock(struct page *page, - struct lruvec *lruvec) +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) { pg_data_t *pgdat = page_pgdat(page); - return lruvec == &pgdat->__lruvec; + return &pgdat->__lruvec; } static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) @@ -1255,6 +1240,12 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } +static inline +struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) +{ + return NULL; +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } @@ -1409,6 +1400,11 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, { } +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + return 0; +} + static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { @@ -1421,6 +1417,10 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline void mem_cgroup_flush_stats(void) +{ +} + static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1516,12 +1516,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, spin_unlock_irqrestore(&lruvec->lru_lock, flags); } +/* Test requires a stable page->memcg binding, see page_memcg() */ +static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) +{ + return lruvec_pgdat(lruvec) == page_pgdat(page) && + lruvec_memcg(lruvec) == page_memcg(page); +} + /* Don't lock again iff page's lruvec locked */ static inline struct lruvec *relock_page_lruvec_irq(struct page *page, struct lruvec *locked_lruvec) { if (locked_lruvec) { - if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + if (page_matches_lruvec(page, locked_lruvec)) return locked_lruvec; unlock_page_lruvec_irq(locked_lruvec); @@ -1535,7 +1542,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, struct lruvec *locked_lruvec, unsigned long *flags) { if (locked_lruvec) { - if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + if (page_matches_lruvec(page, locked_lruvec)) return locked_lruvec; unlock_page_lruvec_irqrestore(locked_lruvec, *flags); @@ -1593,7 +1600,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, + gfp_t gfp_mask); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; @@ -1631,6 +1639,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg, #endif #ifdef CONFIG_MEMCG_KMEM +bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); @@ -1684,6 +1693,10 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) struct mem_cgroup *mem_cgroup_from_obj(void *p); #else +static inline bool mem_cgroup_kmem_disabled(void) +{ + return true; +} static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) diff --git a/include/linux/memory.h b/include/linux/memory.h index 97e92e8b556a..7efc0a7c14c9 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -23,6 +23,48 @@ #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) +/** + * struct memory_group - a logical group of memory blocks + * @nid: The node id for all memory blocks inside the memory group. + * @blocks: List of all memory blocks belonging to this memory group. + * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this + * memory group. + * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this + * memory group. + * @is_dynamic: The memory group type: static vs. dynamic + * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum + * number of pages we'll have in this static memory group. + * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages + * in which memory is added/removed in this dynamic memory group. + * This granularity defines the alignment of a unit in physical + * address space; it has to be at least as big as a single + * memory block. + * + * A memory group logically groups memory blocks; each memory block + * belongs to at most one memory group. A memory group corresponds to + * a memory device, such as a DIMM or a NUMA node, which spans multiple + * memory blocks and might even span multiple non-contiguous physical memory + * ranges. + * + * Modification of members after registration is serialized by memory + * hot(un)plug code. + */ +struct memory_group { + int nid; + struct list_head memory_blocks; + unsigned long present_kernel_pages; + unsigned long present_movable_pages; + bool is_dynamic; + union { + struct { + unsigned long max_pages; + } s; + struct { + unsigned long unit_pages; + } d; + }; +}; + struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ @@ -34,6 +76,8 @@ struct memory_block { * lay at the beginning of the memory block. */ unsigned long nr_vmemmap_pages; + struct memory_group *group; /* group (if any) for this block */ + struct list_head group_next; /* next block inside memory group */ }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -86,16 +130,25 @@ static inline int memory_notify(unsigned long val, void *v) extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size, - unsigned long vmemmap_pages); + unsigned long vmemmap_pages, + struct memory_group *group); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); -extern struct memory_block *find_memory_block(struct mem_section *); +extern struct memory_block *find_memory_block(unsigned long section_nr); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, void *arg, walk_memory_blocks_func_t func); extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) + +extern int memory_group_register_static(int nid, unsigned long max_pages); +extern int memory_group_register_dynamic(int nid, unsigned long unit_pages); +extern int memory_group_unregister(int mgid); +struct memory_group *memory_group_find_by_id(int mgid); +typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); +int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, + struct memory_group *excluded, void *arg); #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 28f32fd00fe9..e5a867c950b2 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -12,24 +12,13 @@ struct zone; struct pglist_data; struct mem_section; struct memory_block; +struct memory_group; struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); -/* - * Types for free bootmem stored in page->lru.next. These have to be in - * some random range in unsigned long space for debugging purposes. - */ -enum { - MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, - SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, - MIX_SECTION_INFO, - NODE_INFO, - MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, -}; - /* Types for control the zone type of onlined and offlined memory */ enum { /* Offline the memory. */ @@ -62,6 +51,11 @@ typedef int __bitwise mhp_t; * Only selected architectures support it with SPARSE_VMEMMAP. */ #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) +/* + * The nid field specifies a memory group id (mgid) instead. The memory group + * implies the node id (nid). + */ +#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) /* * Extended parameters for memory hotplug: @@ -107,13 +101,15 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); -extern void adjust_present_page_count(struct zone *zone, long nr_pages); +extern void adjust_present_page_count(struct page *page, + struct memory_group *group, + long nr_pages); /* VM interface that may be used by firmware interface */ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, struct zone *zone); extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, - struct zone *zone); + struct zone *zone, struct memory_group *group); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, @@ -142,8 +138,7 @@ static inline bool movable_node_is_enabled(void) return movable_node_enabled; } -extern void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap); +extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); @@ -222,17 +217,6 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_NUMA */ #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ -#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE -extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); -#else -static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) -{ -} -#endif -extern void put_page_bootmem(struct page *page); -extern void get_page_bootmem(unsigned long ingo, struct page *page, - unsigned long type); - void get_online_mems(void); void put_online_mems(void); @@ -260,10 +244,6 @@ static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} -static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) -{ -} - static inline int try_online_node(int nid) { return 0; @@ -319,25 +299,27 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #ifdef CONFIG_MEMORY_HOTREMOVE extern void try_offline_node(int nid); -extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); -extern int remove_memory(int nid, u64 start, u64 size); -extern void __remove_memory(int nid, u64 start, u64 size); -extern int offline_and_remove_memory(int nid, u64 start, u64 size); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, + struct memory_group *group); +extern int remove_memory(u64 start, u64 size); +extern void __remove_memory(u64 start, u64 size); +extern int offline_and_remove_memory(u64 start, u64 size); #else static inline void try_offline_node(int nid) {} -static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) +static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, + struct memory_group *group) { return -EINVAL; } -static inline int remove_memory(int nid, u64 start, u64 size) +static inline int remove_memory(u64 start, u64 size) { return -EBUSY; } -static inline void __remove_memory(int nid, u64 start, u64 size) {} +static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void set_zone_contiguous(struct zone *zone); @@ -366,7 +348,8 @@ extern void sparse_remove_section(struct mem_section *ms, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); -extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, +extern struct zone *zone_for_pfn_range(int online_type, int nid, + struct memory_group *group, unsigned long start_pfn, unsigned long nr_pages); extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5f1c74df264d..4091692bed8c 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -46,11 +46,8 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - union { - short preferred_node; /* preferred */ - nodemask_t nodes; /* interleave/bind */ - /* undefined for default */ - } v; + nodemask_t nodes; /* interleave/bind/perfer */ + union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ @@ -150,7 +147,7 @@ extern int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); -extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, +extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); @@ -187,6 +184,14 @@ extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); +extern bool numa_demotion_enabled; + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return (pol->mode == MPOL_PREFERRED_MANY); +} + + #else struct mempolicy {}; @@ -295,5 +300,13 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp) { return NULL; } + +#define numa_demotion_enabled false + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return false; +} + #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 45a79da89c5f..c0e9d35889e8 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap { }; /* - * Specialize ZONE_DEVICE memory into multiple types each having differents + * Specialize ZONE_DEVICE memory into multiple types each has a different * usage. * * MEMORY_DEVICE_PRIVATE: diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index e6ee2ec35de9..cbf9d7619493 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -186,10 +186,11 @@ enum ddr_pwrst { #define PRCMU_FW_PROJECT_U8500_C3 8 #define PRCMU_FW_PROJECT_U8500_C4 9 #define PRCMU_FW_PROJECT_U9500_MBL 10 -#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */ +#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */ #define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */ #define PRCMU_FW_PROJECT_U8520 13 #define PRCMU_FW_PROJECT_U8420 14 +#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */ #define PRCMU_FW_PROJECT_U8420_SYSCLK 17 #define PRCMU_FW_PROJECT_A9420 20 /* [32..63] 9540 and derivatives */ diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h index 2660226138b8..e5b8dbf828b6 100644 --- a/include/linux/mfd/hi6421-spmi-pmic.h +++ b/include/linux/mfd/hi6421-spmi-pmic.h @@ -19,11 +19,6 @@ struct hi6421_spmi_pmic { struct resource *res; struct device *dev; void __iomem *regs; - spinlock_t lock; - struct irq_domain *domain; - int irq; - int gpio; - unsigned int *irqs; struct regmap *regmap; }; diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h index b06171322178..af5d97239c0d 100644 --- a/include/linux/mfd/hi655x-pmic.h +++ b/include/linux/mfd/hi655x-pmic.h @@ -2,7 +2,7 @@ /* * Device driver for regulators in hi655x IC * - * Copyright (c) 2016 Hisilicon. + * Copyright (c) 2016 HiSilicon Ltd. * * Authors: * Chen Feng <puck.chen@hisilicon.com> diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h new file mode 100644 index 000000000000..129a6c078221 --- /dev/null +++ b/include/linux/mfd/idt82p33_reg.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ +#ifndef HAVE_IDT82P33_REG +#define HAVE_IDT82P33_REG + +/* Register address */ +#define DPLL1_TOD_CNFG 0x134 +#define DPLL2_TOD_CNFG 0x1B4 + +#define DPLL1_TOD_STS 0x10B +#define DPLL2_TOD_STS 0x18B + +#define DPLL1_TOD_TRIGGER 0x115 +#define DPLL2_TOD_TRIGGER 0x195 + +#define DPLL1_OPERATING_MODE_CNFG 0x120 +#define DPLL2_OPERATING_MODE_CNFG 0x1A0 + +#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C +#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC + +#define DPLL1_PHASE_OFFSET_CNFG 0x143 +#define DPLL2_PHASE_OFFSET_CNFG 0x1C3 + +#define DPLL1_SYNC_EDGE_CNFG 0x140 +#define DPLL2_SYNC_EDGE_CNFG 0x1C0 + +#define DPLL1_INPUT_MODE_CNFG 0x116 +#define DPLL2_INPUT_MODE_CNFG 0x196 + +#define DPLL1_OPERATING_STS 0x102 +#define DPLL2_OPERATING_STS 0x182 + +#define DPLL1_CURRENT_FREQ_STS 0x103 +#define DPLL2_CURRENT_FREQ_STS 0x183 + +#define REG_SOFT_RESET 0X381 + +#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn))) + +/* Register bit definitions */ +#define SYNC_TOD BIT(1) +#define PH_OFFSET_EN BIT(7) +#define SQUELCH_ENABLE BIT(5) + +/* Bit definitions for the DPLL_MODE register */ +#define PLL_MODE_SHIFT (0) +#define PLL_MODE_MASK (0x1F) +#define COMBO_MODE_EN BIT(5) +#define COMBO_MODE_SHIFT (6) +#define COMBO_MODE_MASK (0x3) + +/* Bit definitions for DPLL_OPERATING_STS register */ +#define OPERATING_STS_MASK (0x7) +#define OPERATING_STS_SHIFT (0x0) + +/* Bit definitions for DPLL_TOD_TRIGGER register */ +#define READ_TRIGGER_MASK (0xF) +#define READ_TRIGGER_SHIFT (0x0) +#define WRITE_TRIGGER_MASK (0xF0) +#define WRITE_TRIGGER_SHIFT (0x4) + +/* Bit definitions for REG_SOFT_RESET register */ +#define SOFT_RESET_EN BIT(7) + +enum pll_mode { + PLL_MODE_MIN = 0, + PLL_MODE_AUTOMATIC = PLL_MODE_MIN, + PLL_MODE_FORCE_FREERUN = 1, + PLL_MODE_FORCE_HOLDOVER = 2, + PLL_MODE_FORCE_LOCKED = 4, + PLL_MODE_FORCE_PRE_LOCKED2 = 5, + PLL_MODE_FORCE_PRE_LOCKED = 6, + PLL_MODE_FORCE_LOST_PHASE = 7, + PLL_MODE_DCO = 10, + PLL_MODE_WPH = 18, + PLL_MODE_MAX = PLL_MODE_WPH, +}; + +enum hw_tod_trig_sel { + HW_TOD_TRIG_SEL_MIN = 0, + HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN, + HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN, + HW_TOD_TRIG_SEL_SYNC_SEL = 1, + HW_TOD_TRIG_SEL_IN12 = 2, + HW_TOD_TRIG_SEL_IN13 = 3, + HW_TOD_TRIG_SEL_IN14 = 4, + HW_TOD_TRIG_SEL_TOD_PPS = 5, + HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6, + HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7, + HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8, + HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9, + HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, + WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, +}; + +/** @brief Enumerated type listing DPLL operational modes */ +enum dpll_state { + DPLL_STATE_FREERUN = 1, + DPLL_STATE_HOLDOVER = 2, + DPLL_STATE_LOCKED = 4, + DPLL_STATE_PRELOCKED2 = 5, + DPLL_STATE_PRELOCKED = 6, + DPLL_STATE_LOSTPHASE = 7, + DPLL_STATE_MAX +}; + +#endif diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h new file mode 100644 index 000000000000..92d763230bdf --- /dev/null +++ b/include/linux/mfd/idt8a340_reg.h @@ -0,0 +1,729 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Based on 5.2.0, Family Programming Guide (Sept 30, 2020) + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ +#ifndef HAVE_IDT8A340_REG +#define HAVE_IDT8A340_REG + +#define PAGE_ADDR_BASE 0x0000 +#define PAGE_ADDR 0x00fc + +#define HW_REVISION 0x8180 +#define REV_ID 0x007a + +#define HW_DPLL_0 (0x8a00) +#define HW_DPLL_1 (0x8b00) +#define HW_DPLL_2 (0x8c00) +#define HW_DPLL_3 (0x8d00) +#define HW_DPLL_4 (0x8e00) +#define HW_DPLL_5 (0x8f00) +#define HW_DPLL_6 (0x9000) +#define HW_DPLL_7 (0x9100) + +#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080) +#define HW_DPLL_TOD_CTRL_1 (0x089) +#define HW_DPLL_TOD_CTRL_2 (0x08A) +#define HW_DPLL_TOD_OVR__0 (0x098) +#define HW_DPLL_TOD_OUT_0__0 (0x0B0) + +#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740) +#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741) +#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742) +#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743) +#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744) +#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745) +#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746) +#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747) +#define HW_Q8_CH_SYNC_CTRL_0 (0xa748) +#define HW_Q8_CH_SYNC_CTRL_1 (0xa749) +#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a) +#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b) +#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c) +#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d) +#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e) +#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f) + +#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14 +#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15 +#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16 +#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17 + +#define SYNCTRL1_MASTER_SYNC_RST BIT(7) +#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5) +#define SYNCTRL1_TOD_SYNC_TRIG BIT(4) +#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3) +#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2) +#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1) +#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0) + +#define HW_Q8_CTRL_SPARE (0xa7d4) +#define HW_Q11_CTRL_SPARE (0xa7ec) + +/** + * Select FOD5 as sync_trigger for Q8 divider. + * Transition from logic zero to one + * sets trigger to sync Q8 divider. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_SYNC_TRIG BIT(1) + +/** + * Enable FOD5 as driver for clock and sync for Q8 divider. + * Enable fanout buffer for FOD5. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + +/** + * Select FOD6 as sync_trigger for Q11 divider. + * Transition from logic zero to one + * sets trigger to sync Q11 divider. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_SYNC_TRIG BIT(1) + +/** + * Enable FOD6 as driver for clock and sync for Q11 divider. + * Enable fanout buffer for FOD6. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + +#define RESET_CTRL 0xc000 +#define SM_RESET 0x0012 +#define SM_RESET_V520 0x0013 +#define SM_RESET_CMD 0x5A + +#define GENERAL_STATUS 0xc014 +#define BOOT_STATUS 0x0000 +#define HW_REV_ID 0x000A +#define BOND_ID 0x000B +#define HW_CSR_ID 0x000C +#define HW_IRQ_ID 0x000E +#define MAJ_REL 0x0010 +#define MIN_REL 0x0011 +#define HOTFIX_REL 0x0012 +#define PIPELINE_ID 0x0014 +#define BUILD_ID 0x0018 +#define JTAG_DEVICE_ID 0x001c +#define PRODUCT_ID 0x001e +#define OTP_SCSR_CONFIG_SELECT 0x0022 + +#define STATUS 0xc03c +#define DPLL0_STATUS 0x0018 +#define DPLL1_STATUS 0x0019 +#define DPLL2_STATUS 0x001a +#define DPLL3_STATUS 0x001b +#define DPLL4_STATUS 0x001c +#define DPLL5_STATUS 0x001d +#define DPLL6_STATUS 0x001e +#define DPLL7_STATUS 0x001f +#define DPLL_SYS_STATUS 0x0020 +#define DPLL_SYS_APLL_STATUS 0x0021 +#define DPLL0_FILTER_STATUS 0x0044 +#define DPLL1_FILTER_STATUS 0x004c +#define DPLL2_FILTER_STATUS 0x0054 +#define DPLL3_FILTER_STATUS 0x005c +#define DPLL4_FILTER_STATUS 0x0064 +#define DPLL5_FILTER_STATUS 0x006c +#define DPLL6_FILTER_STATUS 0x0074 +#define DPLL7_FILTER_STATUS 0x007c +#define DPLLSYS_FILTER_STATUS 0x0084 +#define USER_GPIO0_TO_7_STATUS 0x008a +#define USER_GPIO8_TO_15_STATUS 0x008b + +#define GPIO_USER_CONTROL 0xc160 +#define GPIO0_TO_7_OUT 0x0000 +#define GPIO8_TO_15_OUT 0x0001 +#define GPIO0_TO_7_OUT_V520 0x0002 +#define GPIO8_TO_15_OUT_V520 0x0003 + +#define STICKY_STATUS_CLEAR 0xc164 + +#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c + +#define ALERT_CFG 0xc188 + +#define SYS_DPLL_XO 0xc194 + +#define SYS_APLL 0xc19c + +#define INPUT_0 0xc1b0 +#define INPUT_1 0xc1c0 +#define INPUT_2 0xc1d0 +#define INPUT_3 0xc200 +#define INPUT_4 0xc210 +#define INPUT_5 0xc220 +#define INPUT_6 0xc230 +#define INPUT_7 0xc240 +#define INPUT_8 0xc250 +#define INPUT_9 0xc260 +#define INPUT_10 0xc280 +#define INPUT_11 0xc290 +#define INPUT_12 0xc2a0 +#define INPUT_13 0xc2b0 +#define INPUT_14 0xc2c0 +#define INPUT_15 0xc2d0 + +#define REF_MON_0 0xc2e0 +#define REF_MON_1 0xc2ec +#define REF_MON_2 0xc300 +#define REF_MON_3 0xc30c +#define REF_MON_4 0xc318 +#define REF_MON_5 0xc324 +#define REF_MON_6 0xc330 +#define REF_MON_7 0xc33c +#define REF_MON_8 0xc348 +#define REF_MON_9 0xc354 +#define REF_MON_10 0xc360 +#define REF_MON_11 0xc36c +#define REF_MON_12 0xc380 +#define REF_MON_13 0xc38c +#define REF_MON_14 0xc398 +#define REF_MON_15 0xc3a4 + +#define DPLL_0 0xc3b0 +#define DPLL_CTRL_REG_0 0x0002 +#define DPLL_CTRL_REG_1 0x0003 +#define DPLL_CTRL_REG_2 0x0004 +#define DPLL_TOD_SYNC_CFG 0x0031 +#define DPLL_COMBO_SLAVE_CFG_0 0x0032 +#define DPLL_COMBO_SLAVE_CFG_1 0x0033 +#define DPLL_SLAVE_REF_CFG 0x0034 +#define DPLL_REF_MODE 0x0035 +#define DPLL_PHASE_MEASUREMENT_CFG 0x0036 +#define DPLL_MODE 0x0037 +#define DPLL_MODE_V520 0x003B +#define DPLL_1 0xc400 +#define DPLL_2 0xc438 +#define DPLL_2_V520 0xc43c +#define DPLL_3 0xc480 +#define DPLL_4 0xc4b8 +#define DPLL_4_V520 0xc4bc +#define DPLL_5 0xc500 +#define DPLL_6 0xc538 +#define DPLL_6_V520 0xc53c +#define DPLL_7 0xc580 +#define SYS_DPLL 0xc5b8 +#define SYS_DPLL_V520 0xc5bc + +#define DPLL_CTRL_0 0xc600 +#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001 +#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c +#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a +#define DPLL_CTRL_1 0xc63c +#define DPLL_CTRL_2 0xc680 +#define DPLL_CTRL_3 0xc6bc +#define DPLL_CTRL_4 0xc700 +#define DPLL_CTRL_5 0xc73c +#define DPLL_CTRL_6 0xc780 +#define DPLL_CTRL_7 0xc7bc +#define SYS_DPLL_CTRL 0xc800 + +#define DPLL_PHASE_0 0xc818 +/* Signed 42-bit FFO in units of 2^(-53) */ +#define DPLL_WR_PHASE 0x0000 +#define DPLL_PHASE_1 0xc81c +#define DPLL_PHASE_2 0xc820 +#define DPLL_PHASE_3 0xc824 +#define DPLL_PHASE_4 0xc828 +#define DPLL_PHASE_5 0xc82c +#define DPLL_PHASE_6 0xc830 +#define DPLL_PHASE_7 0xc834 + +#define DPLL_FREQ_0 0xc838 +/* Signed 42-bit FFO in units of 2^(-53) */ +#define DPLL_WR_FREQ 0x0000 +#define DPLL_FREQ_1 0xc840 +#define DPLL_FREQ_2 0xc848 +#define DPLL_FREQ_3 0xc850 +#define DPLL_FREQ_4 0xc858 +#define DPLL_FREQ_5 0xc860 +#define DPLL_FREQ_6 0xc868 +#define DPLL_FREQ_7 0xc870 + +#define DPLL_PHASE_PULL_IN_0 0xc880 +#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */ +#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */ +#define PULL_IN_CTRL 0x0007 +#define DPLL_PHASE_PULL_IN_1 0xc888 +#define DPLL_PHASE_PULL_IN_2 0xc890 +#define DPLL_PHASE_PULL_IN_3 0xc898 +#define DPLL_PHASE_PULL_IN_4 0xc8a0 +#define DPLL_PHASE_PULL_IN_5 0xc8a8 +#define DPLL_PHASE_PULL_IN_6 0xc8b0 +#define DPLL_PHASE_PULL_IN_7 0xc8b8 + +#define GPIO_CFG 0xc8c0 +#define GPIO_CFG_GBL 0x0000 +#define GPIO_0 0xc8c2 +#define GPIO_DCO_INC_DEC 0x0000 +#define GPIO_OUT_CTRL_0 0x0001 +#define GPIO_OUT_CTRL_1 0x0002 +#define GPIO_TOD_TRIG 0x0003 +#define GPIO_DPLL_INDICATOR 0x0004 +#define GPIO_LOS_INDICATOR 0x0005 +#define GPIO_REF_INPUT_DSQ_0 0x0006 +#define GPIO_REF_INPUT_DSQ_1 0x0007 +#define GPIO_REF_INPUT_DSQ_2 0x0008 +#define GPIO_REF_INPUT_DSQ_3 0x0009 +#define GPIO_MAN_CLK_SEL_0 0x000a +#define GPIO_MAN_CLK_SEL_1 0x000b +#define GPIO_MAN_CLK_SEL_2 0x000c +#define GPIO_SLAVE 0x000d +#define GPIO_ALERT_OUT_CFG 0x000e +#define GPIO_TOD_NOTIFICATION_CFG 0x000f +#define GPIO_CTRL 0x0010 +#define GPIO_CTRL_V520 0x0011 +#define GPIO_1 0xc8d4 +#define GPIO_2 0xc8e6 +#define GPIO_3 0xc900 +#define GPIO_4 0xc912 +#define GPIO_5 0xc924 +#define GPIO_6 0xc936 +#define GPIO_7 0xc948 +#define GPIO_8 0xc95a +#define GPIO_9 0xc980 +#define GPIO_10 0xc992 +#define GPIO_11 0xc9a4 +#define GPIO_12 0xc9b6 +#define GPIO_13 0xc9c8 +#define GPIO_14 0xc9da +#define GPIO_15 0xca00 + +#define OUT_DIV_MUX 0xca12 +#define OUTPUT_0 0xca14 +#define OUTPUT_0_V520 0xca20 +/* FOD frequency output divider value */ +#define OUT_DIV 0x0000 +#define OUT_DUTY_CYCLE_HIGH 0x0004 +#define OUT_CTRL_0 0x0008 +#define OUT_CTRL_1 0x0009 +/* Phase adjustment in FOD cycles */ +#define OUT_PHASE_ADJ 0x000c +#define OUTPUT_1 0xca24 +#define OUTPUT_1_V520 0xca30 +#define OUTPUT_2 0xca34 +#define OUTPUT_2_V520 0xca40 +#define OUTPUT_3 0xca44 +#define OUTPUT_3_V520 0xca50 +#define OUTPUT_4 0xca54 +#define OUTPUT_4_V520 0xca60 +#define OUTPUT_5 0xca64 +#define OUTPUT_5_V520 0xca80 +#define OUTPUT_6 0xca80 +#define OUTPUT_6_V520 0xca90 +#define OUTPUT_7 0xca90 +#define OUTPUT_7_V520 0xcaa0 +#define OUTPUT_8 0xcaa0 +#define OUTPUT_8_V520 0xcab0 +#define OUTPUT_9 0xcab0 +#define OUTPUT_9_V520 0xcac0 +#define OUTPUT_10 0xcac0 +#define OUTPUT_10_V520 0xcad0 +#define OUTPUT_11 0xcad0 +#define OUTPUT_11_V520 0xcae0 + +#define SERIAL 0xcae0 +#define SERIAL_V520 0xcaf0 + +#define PWM_ENCODER_0 0xcb00 +#define PWM_ENCODER_1 0xcb08 +#define PWM_ENCODER_2 0xcb10 +#define PWM_ENCODER_3 0xcb18 +#define PWM_ENCODER_4 0xcb20 +#define PWM_ENCODER_5 0xcb28 +#define PWM_ENCODER_6 0xcb30 +#define PWM_ENCODER_7 0xcb38 +#define PWM_DECODER_0 0xcb40 +#define PWM_DECODER_1 0xcb48 +#define PWM_DECODER_1_V520 0xcb4a +#define PWM_DECODER_2 0xcb50 +#define PWM_DECODER_2_V520 0xcb54 +#define PWM_DECODER_3 0xcb58 +#define PWM_DECODER_3_V520 0xcb5e +#define PWM_DECODER_4 0xcb60 +#define PWM_DECODER_4_V520 0xcb68 +#define PWM_DECODER_5 0xcb68 +#define PWM_DECODER_5_V520 0xcb80 +#define PWM_DECODER_6 0xcb70 +#define PWM_DECODER_6_V520 0xcb8a +#define PWM_DECODER_7 0xcb80 +#define PWM_DECODER_7_V520 0xcb94 +#define PWM_DECODER_8 0xcb88 +#define PWM_DECODER_8_V520 0xcb9e +#define PWM_DECODER_9 0xcb90 +#define PWM_DECODER_9_V520 0xcba8 +#define PWM_DECODER_10 0xcb98 +#define PWM_DECODER_10_V520 0xcbb2 +#define PWM_DECODER_11 0xcba0 +#define PWM_DECODER_11_V520 0xcbbc +#define PWM_DECODER_12 0xcba8 +#define PWM_DECODER_12_V520 0xcbc6 +#define PWM_DECODER_13 0xcbb0 +#define PWM_DECODER_13_V520 0xcbd0 +#define PWM_DECODER_14 0xcbb8 +#define PWM_DECODER_14_V520 0xcbda +#define PWM_DECODER_15 0xcbc0 +#define PWM_DECODER_15_V520 0xcbe4 +#define PWM_USER_DATA 0xcbc8 +#define PWM_USER_DATA_V520 0xcbf0 + +#define TOD_0 0xcbcc +#define TOD_0_V520 0xcc00 +/* Enable TOD counter, output channel sync and even-PPS mode */ +#define TOD_CFG 0x0000 +#define TOD_CFG_V520 0x0001 +#define TOD_1 0xcbce +#define TOD_1_V520 0xcc02 +#define TOD_2 0xcbd0 +#define TOD_2_V520 0xcc04 +#define TOD_3 0xcbd2 +#define TOD_3_V520 0xcc06 + +#define TOD_WRITE_0 0xcc00 +#define TOD_WRITE_0_V520 0xcc10 +/* 8-bit subns, 32-bit ns, 48-bit seconds */ +#define TOD_WRITE 0x0000 +/* Counter increments after TOD write is completed */ +#define TOD_WRITE_COUNTER 0x000c +/* TOD write trigger configuration */ +#define TOD_WRITE_SELECT_CFG_0 0x000d +/* TOD write trigger selection */ +#define TOD_WRITE_CMD 0x000f +#define TOD_WRITE_1 0xcc10 +#define TOD_WRITE_1_V520 0xcc20 +#define TOD_WRITE_2 0xcc20 +#define TOD_WRITE_2_V520 0xcc30 +#define TOD_WRITE_3 0xcc30 +#define TOD_WRITE_3_V520 0xcc40 + +#define TOD_READ_PRIMARY_0 0xcc40 +#define TOD_READ_PRIMARY_0_V520 0xcc50 +/* 8-bit subns, 32-bit ns, 48-bit seconds */ +#define TOD_READ_PRIMARY 0x0000 +/* Counter increments after TOD write is completed */ +#define TOD_READ_PRIMARY_COUNTER 0x000b +/* Read trigger configuration */ +#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c +/* Read trigger selection */ +#define TOD_READ_PRIMARY_CMD 0x000e +#define TOD_READ_PRIMARY_CMD_V520 0x000f +#define TOD_READ_PRIMARY_1 0xcc50 +#define TOD_READ_PRIMARY_1_V520 0xcc60 +#define TOD_READ_PRIMARY_2 0xcc60 +#define TOD_READ_PRIMARY_2_V520 0xcc80 +#define TOD_READ_PRIMARY_3 0xcc80 +#define TOD_READ_PRIMARY_3_V520 0xcc90 + +#define TOD_READ_SECONDARY_0 0xcc90 +#define TOD_READ_SECONDARY_0_V520 0xcca0 +#define TOD_READ_SECONDARY_1 0xcca0 +#define TOD_READ_SECONDARY_1_V520 0xccb0 +#define TOD_READ_SECONDARY_2 0xccb0 +#define TOD_READ_SECONDARY_2_V520 0xccc0 +#define TOD_READ_SECONDARY_3 0xccc0 +#define TOD_READ_SECONDARY_3_V520 0xccd0 + +#define OUTPUT_TDC_CFG 0xccd0 +#define OUTPUT_TDC_CFG_V520 0xcce0 +#define OUTPUT_TDC_0 0xcd00 +#define OUTPUT_TDC_1 0xcd08 +#define OUTPUT_TDC_2 0xcd10 +#define OUTPUT_TDC_3 0xcd18 +#define INPUT_TDC 0xcd20 + +#define SCRATCH 0xcf50 +#define SCRATCH_V520 0xcf4c + +#define EEPROM 0xcf68 +#define EEPROM_V520 0xcf64 + +#define OTP 0xcf70 + +#define BYTE 0xcf80 + +/* Bit definitions for the MAJ_REL register */ +#define MAJOR_SHIFT (1) +#define MAJOR_MASK (0x7f) +#define PR_BUILD BIT(0) + +/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */ +#define GPIO0_LEVEL BIT(0) +#define GPIO1_LEVEL BIT(1) +#define GPIO2_LEVEL BIT(2) +#define GPIO3_LEVEL BIT(3) +#define GPIO4_LEVEL BIT(4) +#define GPIO5_LEVEL BIT(5) +#define GPIO6_LEVEL BIT(6) +#define GPIO7_LEVEL BIT(7) + +/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */ +#define GPIO8_LEVEL BIT(0) +#define GPIO9_LEVEL BIT(1) +#define GPIO10_LEVEL BIT(2) +#define GPIO11_LEVEL BIT(3) +#define GPIO12_LEVEL BIT(4) +#define GPIO13_LEVEL BIT(5) +#define GPIO14_LEVEL BIT(6) +#define GPIO15_LEVEL BIT(7) + +/* Bit definitions for the GPIO0_TO_7_OUT register */ +#define GPIO0_DRIVE_LEVEL BIT(0) +#define GPIO1_DRIVE_LEVEL BIT(1) +#define GPIO2_DRIVE_LEVEL BIT(2) +#define GPIO3_DRIVE_LEVEL BIT(3) +#define GPIO4_DRIVE_LEVEL BIT(4) +#define GPIO5_DRIVE_LEVEL BIT(5) +#define GPIO6_DRIVE_LEVEL BIT(6) +#define GPIO7_DRIVE_LEVEL BIT(7) + +/* Bit definitions for the GPIO8_TO_15_OUT register */ +#define GPIO8_DRIVE_LEVEL BIT(0) +#define GPIO9_DRIVE_LEVEL BIT(1) +#define GPIO10_DRIVE_LEVEL BIT(2) +#define GPIO11_DRIVE_LEVEL BIT(3) +#define GPIO12_DRIVE_LEVEL BIT(4) +#define GPIO13_DRIVE_LEVEL BIT(5) +#define GPIO14_DRIVE_LEVEL BIT(6) +#define GPIO15_DRIVE_LEVEL BIT(7) + +/* Bit definitions for the DPLL_TOD_SYNC_CFG register */ +#define TOD_SYNC_SOURCE_SHIFT (1) +#define TOD_SYNC_SOURCE_MASK (0x3) +#define TOD_SYNC_EN BIT(0) + +/* Bit definitions for the DPLL_MODE register */ +#define WRITE_TIMER_MODE BIT(6) +#define PLL_MODE_SHIFT (3) +#define PLL_MODE_MASK (0x7) +#define STATE_MODE_SHIFT (0) +#define STATE_MODE_MASK (0x7) + +/* Bit definitions for the GPIO_CFG_GBL register */ +#define SUPPLY_MODE_SHIFT (0) +#define SUPPLY_MODE_MASK (0x3) + +/* Bit definitions for the GPIO_DCO_INC_DEC register */ +#define INCDEC_DPLL_INDEX_SHIFT (0) +#define INCDEC_DPLL_INDEX_MASK (0x7) + +/* Bit definitions for the GPIO_OUT_CTRL_0 register */ +#define CTRL_OUT_0 BIT(0) +#define CTRL_OUT_1 BIT(1) +#define CTRL_OUT_2 BIT(2) +#define CTRL_OUT_3 BIT(3) +#define CTRL_OUT_4 BIT(4) +#define CTRL_OUT_5 BIT(5) +#define CTRL_OUT_6 BIT(6) +#define CTRL_OUT_7 BIT(7) + +/* Bit definitions for the GPIO_OUT_CTRL_1 register */ +#define CTRL_OUT_8 BIT(0) +#define CTRL_OUT_9 BIT(1) +#define CTRL_OUT_10 BIT(2) +#define CTRL_OUT_11 BIT(3) +#define CTRL_OUT_12 BIT(4) +#define CTRL_OUT_13 BIT(5) +#define CTRL_OUT_14 BIT(6) +#define CTRL_OUT_15 BIT(7) + +/* Bit definitions for the GPIO_TOD_TRIG register */ +#define TOD_TRIG_0 BIT(0) +#define TOD_TRIG_1 BIT(1) +#define TOD_TRIG_2 BIT(2) +#define TOD_TRIG_3 BIT(3) + +/* Bit definitions for the GPIO_DPLL_INDICATOR register */ +#define IND_DPLL_INDEX_SHIFT (0) +#define IND_DPLL_INDEX_MASK (0x7) + +/* Bit definitions for the GPIO_LOS_INDICATOR register */ +#define REFMON_INDEX_SHIFT (0) +#define REFMON_INDEX_MASK (0xf) +/* Active level of LOS indicator, 0=low 1=high */ +#define ACTIVE_LEVEL BIT(4) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */ +#define DSQ_INP_0 BIT(0) +#define DSQ_INP_1 BIT(1) +#define DSQ_INP_2 BIT(2) +#define DSQ_INP_3 BIT(3) +#define DSQ_INP_4 BIT(4) +#define DSQ_INP_5 BIT(5) +#define DSQ_INP_6 BIT(6) +#define DSQ_INP_7 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */ +#define DSQ_INP_8 BIT(0) +#define DSQ_INP_9 BIT(1) +#define DSQ_INP_10 BIT(2) +#define DSQ_INP_11 BIT(3) +#define DSQ_INP_12 BIT(4) +#define DSQ_INP_13 BIT(5) +#define DSQ_INP_14 BIT(6) +#define DSQ_INP_15 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */ +#define DSQ_DPLL_0 BIT(0) +#define DSQ_DPLL_1 BIT(1) +#define DSQ_DPLL_2 BIT(2) +#define DSQ_DPLL_3 BIT(3) +#define DSQ_DPLL_4 BIT(4) +#define DSQ_DPLL_5 BIT(5) +#define DSQ_DPLL_6 BIT(6) +#define DSQ_DPLL_7 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */ +#define DSQ_DPLL_SYS BIT(0) +#define GPIO_DSQ_LEVEL BIT(1) + +/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */ +#define DPLL_TOD_SHIFT (0) +#define DPLL_TOD_MASK (0x3) +#define TOD_READ_SECONDARY BIT(2) +#define GPIO_ASSERT_LEVEL BIT(3) + +/* Bit definitions for the GPIO_CTRL register */ +#define GPIO_FUNCTION_EN BIT(0) +#define GPIO_CMOS_OD_MODE BIT(1) +#define GPIO_CONTROL_DIR BIT(2) +#define GPIO_PU_PD_MODE BIT(3) +#define GPIO_FUNCTION_SHIFT (4) +#define GPIO_FUNCTION_MASK (0xf) + +/* Bit definitions for the OUT_CTRL_1 register */ +#define OUT_SYNC_DISABLE BIT(7) +#define SQUELCH_VALUE BIT(6) +#define SQUELCH_DISABLE BIT(5) +#define PAD_VDDO_SHIFT (2) +#define PAD_VDDO_MASK (0x7) +#define PAD_CMOSDRV_SHIFT (0) +#define PAD_CMOSDRV_MASK (0x3) + +/* Bit definitions for the TOD_CFG register */ +#define TOD_EVEN_PPS_MODE BIT(2) +#define TOD_OUT_SYNC_ENABLE BIT(1) +#define TOD_ENABLE BIT(0) + +/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */ +#define WR_PWM_DECODER_INDEX_SHIFT (4) +#define WR_PWM_DECODER_INDEX_MASK (0xf) +#define WR_REF_INDEX_SHIFT (0) +#define WR_REF_INDEX_MASK (0xf) + +/* Bit definitions for the TOD_WRITE_CMD register */ +#define TOD_WRITE_SELECTION_SHIFT (0) +#define TOD_WRITE_SELECTION_MASK (0xf) +/* 4.8.7 */ +#define TOD_WRITE_TYPE_SHIFT (4) +#define TOD_WRITE_TYPE_MASK (0x3) + +/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */ +#define RD_PWM_DECODER_INDEX_SHIFT (4) +#define RD_PWM_DECODER_INDEX_MASK (0xf) +#define RD_REF_INDEX_SHIFT (0) +#define RD_REF_INDEX_MASK (0xf) + +/* Bit definitions for the TOD_READ_PRIMARY_CMD register */ +#define TOD_READ_TRIGGER_MODE BIT(4) +#define TOD_READ_TRIGGER_SHIFT (0) +#define TOD_READ_TRIGGER_MASK (0xf) + +/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */ +#define COMBO_MASTER_HOLD BIT(0) + +/* Bit definitions for DPLL_SYS_STATUS register */ +#define DPLL_SYS_STATE_MASK (0xf) + +/* Bit definitions for SYS_APLL_STATUS register */ +#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0) +#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0 +#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1 + +/* Bit definitions for the DPLL0_STATUS register */ +#define DPLL_STATE_MASK (0xf) +#define DPLL_STATE_SHIFT (0x0) + +/* Values of DPLL_N.DPLL_MODE.PLL_MODE */ +enum pll_mode { + PLL_MODE_MIN = 0, + PLL_MODE_NORMAL = PLL_MODE_MIN, + PLL_MODE_WRITE_PHASE = 1, + PLL_MODE_WRITE_FREQUENCY = 2, + PLL_MODE_GPIO_INC_DEC = 3, + PLL_MODE_SYNTHESIS = 4, + PLL_MODE_PHASE_MEASUREMENT = 5, + PLL_MODE_DISABLED = 6, + PLL_MODE_MAX = PLL_MODE_DISABLED, +}; + +enum hw_tod_write_trig_sel { + HW_TOD_WR_TRIG_SEL_MIN = 0, + HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN, + HW_TOD_WR_TRIG_SEL_RESERVED = 1, + HW_TOD_WR_TRIG_SEL_TOD_PPS = 2, + HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3, + HW_TOD_WR_TRIG_SEL_PWM_PPS = 4, + HW_TOD_WR_TRIG_SEL_GPIO = 5, + HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6, + WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC, +}; + +enum scsr_read_trig_sel { + /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */ + SCSR_TOD_READ_TRIG_SEL_DISABLE = 0, + /* TRIGGER IMMEDIATELY */ + SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1, + /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */ + SCSR_TOD_READ_TRIG_SEL_TODPPS = 2, + /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */ + SCSR_TOD_READ_TRIG_SEL_REFCLK = 3, + /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */ + SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4, + SCSR_TOD_READ_TRIG_SEL_RESERVED = 5, + /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */ + SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6, + /* TRIGGER ON SELECTED GPIO */ + SCSR_TOD_READ_TRIG_SEL_GPIO = 7, + SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO, +}; + +/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */ +enum dpll_state { + DPLL_STATE_MIN = 0, + DPLL_STATE_FREERUN = DPLL_STATE_MIN, + DPLL_STATE_LOCKACQ = 1, + DPLL_STATE_LOCKREC = 2, + DPLL_STATE_LOCKED = 3, + DPLL_STATE_HOLDOVER = 4, + DPLL_STATE_OPEN_LOOP = 5, + DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP, +}; + +/* 4.8.7 only */ +enum scsr_tod_write_trig_sel { + SCSR_TOD_WR_TRIG_SEL_DISABLE = 0, + SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1, + SCSR_TOD_WR_TRIG_SEL_REFCLK = 2, + SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3, + SCSR_TOD_WR_TRIG_SEL_TODPPS = 4, + SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5, + SCSR_TOD_WR_TRIG_SEL_GPIO = 6, + SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO, +}; + +/* 4.8.7 only */ +enum scsr_tod_write_type_sel { + SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0, + SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1, + SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2, + SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS, +}; +#endif diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index 5640e6088fe6..4c895072d91b 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -222,31 +222,20 @@ enum lp87565_device_type { #define LP87565_GPIO2_SEL BIT(1) #define LP87565_GPIO1_SEL BIT(0) -#define LP87565_GOIO3_OD BIT(6) -#define LP87565_GOIO2_OD BIT(5) -#define LP87565_GOIO1_OD BIT(4) -#define LP87565_GOIO3_DIR BIT(2) -#define LP87565_GOIO2_DIR BIT(1) -#define LP87565_GOIO1_DIR BIT(0) - -#define LP87565_GOIO3_IN BIT(2) -#define LP87565_GOIO2_IN BIT(1) -#define LP87565_GOIO1_IN BIT(0) - -#define LP87565_GOIO3_OUT BIT(2) -#define LP87565_GOIO2_OUT BIT(1) -#define LP87565_GOIO1_OUT BIT(0) - -enum LP87565_regulator_id { - /* BUCK's */ - LP87565_BUCK_0, - LP87565_BUCK_1, - LP87565_BUCK_2, - LP87565_BUCK_3, - LP87565_BUCK_10, - LP87565_BUCK_23, - LP87565_BUCK_3210, -}; +#define LP87565_GPIO3_OD BIT(6) +#define LP87565_GPIO2_OD BIT(5) +#define LP87565_GPIO1_OD BIT(4) +#define LP87565_GPIO3_DIR BIT(2) +#define LP87565_GPIO2_DIR BIT(1) +#define LP87565_GPIO1_DIR BIT(0) + +#define LP87565_GPIO3_IN BIT(2) +#define LP87565_GPIO2_IN BIT(1) +#define LP87565_GPIO1_IN BIT(0) + +#define LP87565_GPIO3_OUT BIT(2) +#define LP87565_GPIO2_OUT BIT(1) +#define LP87565_GPIO1_OUT BIT(0) /** * struct LP87565 - state holder for the LP87565 driver @@ -263,5 +252,6 @@ struct lp87565 { u8 rev; u8 dev_type; struct regmap *regmap; + struct gpio_desc *reset_gpio; }; #endif /* __LINUX_MFD_LP87565_H */ diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 601cbbc10370..32e3470708ed 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -31,7 +31,7 @@ struct pinctrl_map; * @irq_flags: Mode for primary IRQ (defaults to active low) * @gpio_base: Base GPIO number * @gpio_configs: Array of GPIO configurations (See - * Documentation/driver-api/pinctl.rst) + * Documentation/driver-api/pin-control.rst) * @n_gpio_configs: Number of entries in gpio_configs * @gpsw: General purpose switch mode setting. Depends on the external * hardware connected to the switch. (See the SW1_MODE field diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h index c5a11b7458d4..68578e2019b0 100644 --- a/include/linux/mfd/mt6358/core.h +++ b/include/linux/mfd/mt6358/core.h @@ -6,12 +6,9 @@ #ifndef __MFD_MT6358_CORE_H__ #define __MFD_MT6358_CORE_H__ -#define MT6358_REG_WIDTH 16 - struct irq_top_t { int hwirq_base; unsigned int num_int_regs; - unsigned int num_int_bits; unsigned int en_reg; unsigned int en_reg_shift; unsigned int sta_reg; @@ -25,6 +22,7 @@ struct pmic_irq_data { unsigned short top_int_status_reg; bool *enable_hwirq; bool *cache_hwirq; + const struct irq_top_t *pmic_ints; }; enum mt6358_irq_top_status_shift { @@ -146,8 +144,8 @@ enum mt6358_irq_numbers { { \ .hwirq_base = MT6358_IRQ_##sp##_BASE, \ .num_int_regs = \ - ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \ - .num_int_bits = MT6358_IRQ_##sp##_BITS, \ + ((MT6358_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ .en_reg = MT6358_##sp##_TOP_INT_CON0, \ .en_reg_shift = 0x6, \ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \ diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 2ad0b312aa28..201139b12140 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -8,6 +8,8 @@ /* PMIC Registers */ #define MT6358_SWCID 0xa +#define MT6358_TOPSTATUS 0x28 +#define MT6358_TOP_RST_MISC 0x14c #define MT6358_MISC_TOP_INT_CON0 0x188 #define MT6358_MISC_TOP_INT_STATUS0 0x194 #define MT6358_TOP_INT_STATUS0 0x19e diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h new file mode 100644 index 000000000000..8d298868126d --- /dev/null +++ b/include/linux/mfd/mt6359/core.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359_CORE_H__ +#define __MFD_MT6359_CORE_H__ + +enum mt6359_irq_top_status_shift { + MT6359_BUCK_TOP = 0, + MT6359_LDO_TOP, + MT6359_PSC_TOP, + MT6359_SCK_TOP, + MT6359_BM_TOP, + MT6359_HK_TOP, + MT6359_AUD_TOP = 7, + MT6359_MISC_TOP, +}; + +enum mt6359_irq_numbers { + MT6359_IRQ_VCORE_OC = 1, + MT6359_IRQ_VGPU11_OC, + MT6359_IRQ_VGPU12_OC, + MT6359_IRQ_VMODEM_OC, + MT6359_IRQ_VPROC1_OC, + MT6359_IRQ_VPROC2_OC, + MT6359_IRQ_VS1_OC, + MT6359_IRQ_VS2_OC, + MT6359_IRQ_VPA_OC = 9, + MT6359_IRQ_VFE28_OC = 16, + MT6359_IRQ_VXO22_OC, + MT6359_IRQ_VRF18_OC, + MT6359_IRQ_VRF12_OC, + MT6359_IRQ_VEFUSE_OC, + MT6359_IRQ_VCN33_1_OC, + MT6359_IRQ_VCN33_2_OC, + MT6359_IRQ_VCN13_OC, + MT6359_IRQ_VCN18_OC, + MT6359_IRQ_VA09_OC, + MT6359_IRQ_VCAMIO_OC, + MT6359_IRQ_VA12_OC, + MT6359_IRQ_VAUX18_OC, + MT6359_IRQ_VAUD18_OC, + MT6359_IRQ_VIO18_OC, + MT6359_IRQ_VSRAM_PROC1_OC, + MT6359_IRQ_VSRAM_PROC2_OC, + MT6359_IRQ_VSRAM_OTHERS_OC, + MT6359_IRQ_VSRAM_MD_OC, + MT6359_IRQ_VEMC_OC, + MT6359_IRQ_VSIM1_OC, + MT6359_IRQ_VSIM2_OC, + MT6359_IRQ_VUSB_OC, + MT6359_IRQ_VRFCK_OC, + MT6359_IRQ_VBBCK_OC, + MT6359_IRQ_VBIF28_OC, + MT6359_IRQ_VIBR_OC, + MT6359_IRQ_VIO28_OC, + MT6359_IRQ_VM18_OC, + MT6359_IRQ_VUFS_OC = 45, + MT6359_IRQ_PWRKEY = 48, + MT6359_IRQ_HOMEKEY, + MT6359_IRQ_PWRKEY_R, + MT6359_IRQ_HOMEKEY_R, + MT6359_IRQ_NI_LBAT_INT, + MT6359_IRQ_CHRDET_EDGE = 53, + MT6359_IRQ_RTC = 64, + MT6359_IRQ_FG_BAT_H = 80, + MT6359_IRQ_FG_BAT_L, + MT6359_IRQ_FG_CUR_H, + MT6359_IRQ_FG_CUR_L, + MT6359_IRQ_FG_ZCV = 84, + MT6359_IRQ_FG_N_CHARGE_L = 87, + MT6359_IRQ_FG_IAVG_H, + MT6359_IRQ_FG_IAVG_L = 89, + MT6359_IRQ_FG_DISCHARGE = 91, + MT6359_IRQ_FG_CHARGE, + MT6359_IRQ_BATON_LV = 96, + MT6359_IRQ_BATON_BAT_IN = 98, + MT6359_IRQ_BATON_BAT_OU, + MT6359_IRQ_BIF = 100, + MT6359_IRQ_BAT_H = 112, + MT6359_IRQ_BAT_L, + MT6359_IRQ_BAT2_H, + MT6359_IRQ_BAT2_L, + MT6359_IRQ_BAT_TEMP_H, + MT6359_IRQ_BAT_TEMP_L, + MT6359_IRQ_THR_H, + MT6359_IRQ_THR_L, + MT6359_IRQ_AUXADC_IMP, + MT6359_IRQ_NAG_C_DLTV = 121, + MT6359_IRQ_AUDIO = 128, + MT6359_IRQ_ACCDET = 133, + MT6359_IRQ_ACCDET_EINT0, + MT6359_IRQ_ACCDET_EINT1, + MT6359_IRQ_SPI_CMD_ALERT = 144, + MT6359_IRQ_NR, +}; + +#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC +#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC +#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY +#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC +#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H +#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H +#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO +#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT + +#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1) +#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1) +#define MT6359_IRQ_PSC_BITS \ + (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1) +#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1) +#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1) +#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1) +#define MT6359_IRQ_AUD_BITS \ + (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1) +#define MT6359_IRQ_MISC_BITS \ + (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1) + +#define MT6359_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6359_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6359_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ + .en_reg = MT6359_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6359_##sp##_TOP, \ +} + +#endif /* __MFD_MT6359_CORE_H__ */ diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h new file mode 100644 index 000000000000..2135c9695918 --- /dev/null +++ b/include/linux/mfd/mt6359/registers.h @@ -0,0 +1,529 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359_REGISTERS_H__ +#define __MFD_MT6359_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6359_SWCID 0xa +#define MT6359_MISC_TOP_INT_CON0 0x188 +#define MT6359_MISC_TOP_INT_STATUS0 0x194 +#define MT6359_TOP_INT_STATUS0 0x19e +#define MT6359_SCK_TOP_INT_CON0 0x528 +#define MT6359_SCK_TOP_INT_STATUS0 0x534 +#define MT6359_EOSC_CALI_CON0 0x53a +#define MT6359_EOSC_CALI_CON1 0x53c +#define MT6359_RTC_MIX_CON0 0x53e +#define MT6359_RTC_MIX_CON1 0x540 +#define MT6359_RTC_MIX_CON2 0x542 +#define MT6359_RTC_DSN_ID 0x580 +#define MT6359_RTC_DSN_REV0 0x582 +#define MT6359_RTC_DBI 0x584 +#define MT6359_RTC_DXI 0x586 +#define MT6359_RTC_BBPU 0x588 +#define MT6359_RTC_IRQ_STA 0x58a +#define MT6359_RTC_IRQ_EN 0x58c +#define MT6359_RTC_CII_EN 0x58e +#define MT6359_RTC_AL_MASK 0x590 +#define MT6359_RTC_TC_SEC 0x592 +#define MT6359_RTC_TC_MIN 0x594 +#define MT6359_RTC_TC_HOU 0x596 +#define MT6359_RTC_TC_DOM 0x598 +#define MT6359_RTC_TC_DOW 0x59a +#define MT6359_RTC_TC_MTH 0x59c +#define MT6359_RTC_TC_YEA 0x59e +#define MT6359_RTC_AL_SEC 0x5a0 +#define MT6359_RTC_AL_MIN 0x5a2 +#define MT6359_RTC_AL_HOU 0x5a4 +#define MT6359_RTC_AL_DOM 0x5a6 +#define MT6359_RTC_AL_DOW 0x5a8 +#define MT6359_RTC_AL_MTH 0x5aa +#define MT6359_RTC_AL_YEA 0x5ac +#define MT6359_RTC_OSC32CON 0x5ae +#define MT6359_RTC_POWERKEY1 0x5b0 +#define MT6359_RTC_POWERKEY2 0x5b2 +#define MT6359_RTC_PDN1 0x5b4 +#define MT6359_RTC_PDN2 0x5b6 +#define MT6359_RTC_SPAR0 0x5b8 +#define MT6359_RTC_SPAR1 0x5ba +#define MT6359_RTC_PROT 0x5bc +#define MT6359_RTC_DIFF 0x5be +#define MT6359_RTC_CALI 0x5c0 +#define MT6359_RTC_WRTGR 0x5c2 +#define MT6359_RTC_CON 0x5c4 +#define MT6359_RTC_SEC_CTRL 0x5c6 +#define MT6359_RTC_INT_CNT 0x5c8 +#define MT6359_RTC_SEC_DAT0 0x5ca +#define MT6359_RTC_SEC_DAT1 0x5cc +#define MT6359_RTC_SEC_DAT2 0x5ce +#define MT6359_RTC_SEC_DSN_ID 0x600 +#define MT6359_RTC_SEC_DSN_REV0 0x602 +#define MT6359_RTC_SEC_DBI 0x604 +#define MT6359_RTC_SEC_DXI 0x606 +#define MT6359_RTC_TC_SEC_SEC 0x608 +#define MT6359_RTC_TC_MIN_SEC 0x60a +#define MT6359_RTC_TC_HOU_SEC 0x60c +#define MT6359_RTC_TC_DOM_SEC 0x60e +#define MT6359_RTC_TC_DOW_SEC 0x610 +#define MT6359_RTC_TC_MTH_SEC 0x612 +#define MT6359_RTC_TC_YEA_SEC 0x614 +#define MT6359_RTC_SEC_CK_PDN 0x616 +#define MT6359_RTC_SEC_WRTGR 0x618 +#define MT6359_PSC_TOP_INT_CON0 0x910 +#define MT6359_PSC_TOP_INT_STATUS0 0x91c +#define MT6359_BM_TOP_INT_CON0 0xc32 +#define MT6359_BM_TOP_INT_CON1 0xc38 +#define MT6359_BM_TOP_INT_STATUS0 0xc4a +#define MT6359_BM_TOP_INT_STATUS1 0xc4c +#define MT6359_HK_TOP_INT_CON0 0xf92 +#define MT6359_HK_TOP_INT_STATUS0 0xf9e +#define MT6359_BUCK_TOP_INT_CON0 0x1418 +#define MT6359_BUCK_TOP_INT_STATUS0 0x1424 +#define MT6359_BUCK_VPU_CON0 0x1488 +#define MT6359_BUCK_VPU_DBG0 0x14a6 +#define MT6359_BUCK_VPU_DBG1 0x14a8 +#define MT6359_BUCK_VPU_ELR0 0x14ac +#define MT6359_BUCK_VCORE_CON0 0x1508 +#define MT6359_BUCK_VCORE_DBG0 0x1526 +#define MT6359_BUCK_VCORE_DBG1 0x1528 +#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a +#define MT6359_BUCK_VCORE_ELR0 0x1534 +#define MT6359_BUCK_VGPU11_CON0 0x1588 +#define MT6359_BUCK_VGPU11_DBG0 0x15a6 +#define MT6359_BUCK_VGPU11_DBG1 0x15a8 +#define MT6359_BUCK_VGPU11_ELR0 0x15ac +#define MT6359_BUCK_VMODEM_CON0 0x1688 +#define MT6359_BUCK_VMODEM_DBG0 0x16a6 +#define MT6359_BUCK_VMODEM_DBG1 0x16a8 +#define MT6359_BUCK_VMODEM_ELR0 0x16ae +#define MT6359_BUCK_VPROC1_CON0 0x1708 +#define MT6359_BUCK_VPROC1_DBG0 0x1726 +#define MT6359_BUCK_VPROC1_DBG1 0x1728 +#define MT6359_BUCK_VPROC1_ELR0 0x172e +#define MT6359_BUCK_VPROC2_CON0 0x1788 +#define MT6359_BUCK_VPROC2_DBG0 0x17a6 +#define MT6359_BUCK_VPROC2_DBG1 0x17a8 +#define MT6359_BUCK_VPROC2_ELR0 0x17b2 +#define MT6359_BUCK_VS1_CON0 0x1808 +#define MT6359_BUCK_VS1_DBG0 0x1826 +#define MT6359_BUCK_VS1_DBG1 0x1828 +#define MT6359_BUCK_VS1_ELR0 0x1834 +#define MT6359_BUCK_VS2_CON0 0x1888 +#define MT6359_BUCK_VS2_DBG0 0x18a6 +#define MT6359_BUCK_VS2_DBG1 0x18a8 +#define MT6359_BUCK_VS2_ELR0 0x18b4 +#define MT6359_BUCK_VPA_CON0 0x1908 +#define MT6359_BUCK_VPA_CON1 0x190e +#define MT6359_BUCK_VPA_CFG0 0x1910 +#define MT6359_BUCK_VPA_CFG1 0x1912 +#define MT6359_BUCK_VPA_DBG0 0x1914 +#define MT6359_BUCK_VPA_DBG1 0x1916 +#define MT6359_VGPUVCORE_ANA_CON2 0x198e +#define MT6359_VGPUVCORE_ANA_CON13 0x19a4 +#define MT6359_VPROC1_ANA_CON3 0x19b2 +#define MT6359_VPROC2_ANA_CON3 0x1a0e +#define MT6359_VMODEM_ANA_CON3 0x1a1a +#define MT6359_VPU_ANA_CON3 0x1a26 +#define MT6359_VS1_ANA_CON0 0x1a2c +#define MT6359_VS2_ANA_CON0 0x1a34 +#define MT6359_VPA_ANA_CON0 0x1a3c +#define MT6359_LDO_TOP_INT_CON0 0x1b14 +#define MT6359_LDO_TOP_INT_CON1 0x1b1a +#define MT6359_LDO_TOP_INT_STATUS0 0x1b28 +#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a +#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40 +#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42 +#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44 +#define MT6359_LDO_VSRAM_MD_ELR 0x1b46 +#define MT6359_LDO_VFE28_CON0 0x1b88 +#define MT6359_LDO_VFE28_MON 0x1b8a +#define MT6359_LDO_VXO22_CON0 0x1b98 +#define MT6359_LDO_VXO22_MON 0x1b9a +#define MT6359_LDO_VRF18_CON0 0x1ba8 +#define MT6359_LDO_VRF18_MON 0x1baa +#define MT6359_LDO_VRF12_CON0 0x1bb8 +#define MT6359_LDO_VRF12_MON 0x1bba +#define MT6359_LDO_VEFUSE_CON0 0x1bc8 +#define MT6359_LDO_VEFUSE_MON 0x1bca +#define MT6359_LDO_VCN33_1_CON0 0x1bd8 +#define MT6359_LDO_VCN33_1_MON 0x1bda +#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8 +#define MT6359_LDO_VCN33_2_CON0 0x1c08 +#define MT6359_LDO_VCN33_2_MON 0x1c0a +#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18 +#define MT6359_LDO_VCN13_CON0 0x1c1a +#define MT6359_LDO_VCN13_MON 0x1c1c +#define MT6359_LDO_VCN18_CON0 0x1c2a +#define MT6359_LDO_VCN18_MON 0x1c2c +#define MT6359_LDO_VA09_CON0 0x1c3a +#define MT6359_LDO_VA09_MON 0x1c3c +#define MT6359_LDO_VCAMIO_CON0 0x1c4a +#define MT6359_LDO_VCAMIO_MON 0x1c4c +#define MT6359_LDO_VA12_CON0 0x1c5a +#define MT6359_LDO_VA12_MON 0x1c5c +#define MT6359_LDO_VAUX18_CON0 0x1c88 +#define MT6359_LDO_VAUX18_MON 0x1c8a +#define MT6359_LDO_VAUD18_CON0 0x1c98 +#define MT6359_LDO_VAUD18_MON 0x1c9a +#define MT6359_LDO_VIO18_CON0 0x1ca8 +#define MT6359_LDO_VIO18_MON 0x1caa +#define MT6359_LDO_VEMC_CON0 0x1cb8 +#define MT6359_LDO_VEMC_MON 0x1cba +#define MT6359_LDO_VSIM1_CON0 0x1cc8 +#define MT6359_LDO_VSIM1_MON 0x1cca +#define MT6359_LDO_VSIM2_CON0 0x1cd8 +#define MT6359_LDO_VSIM2_MON 0x1cda +#define MT6359_LDO_VUSB_CON0 0x1d08 +#define MT6359_LDO_VUSB_MON 0x1d0a +#define MT6359_LDO_VUSB_MULTI_SW 0x1d18 +#define MT6359_LDO_VRFCK_CON0 0x1d1a +#define MT6359_LDO_VRFCK_MON 0x1d1c +#define MT6359_LDO_VBBCK_CON0 0x1d2a +#define MT6359_LDO_VBBCK_MON 0x1d2c +#define MT6359_LDO_VBIF28_CON0 0x1d3a +#define MT6359_LDO_VBIF28_MON 0x1d3c +#define MT6359_LDO_VIBR_CON0 0x1d4a +#define MT6359_LDO_VIBR_MON 0x1d4c +#define MT6359_LDO_VIO28_CON0 0x1d5a +#define MT6359_LDO_VIO28_MON 0x1d5c +#define MT6359_LDO_VM18_CON0 0x1d88 +#define MT6359_LDO_VM18_MON 0x1d8a +#define MT6359_LDO_VUFS_CON0 0x1d98 +#define MT6359_LDO_VUFS_MON 0x1d9a +#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88 +#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a +#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e +#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6 +#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8 +#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac +#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08 +#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a +#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e +#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26 +#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c +#define MT6359_LDO_VSRAM_MD_MON 0x1f2e +#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32 +#define MT6359_VFE28_ANA_CON0 0x1f88 +#define MT6359_VAUX18_ANA_CON0 0x1f8c +#define MT6359_VUSB_ANA_CON0 0x1f90 +#define MT6359_VBIF28_ANA_CON0 0x1f94 +#define MT6359_VCN33_1_ANA_CON0 0x1f98 +#define MT6359_VCN33_2_ANA_CON0 0x1f9c +#define MT6359_VEMC_ANA_CON0 0x1fa0 +#define MT6359_VSIM1_ANA_CON0 0x1fa4 +#define MT6359_VSIM2_ANA_CON0 0x1fa8 +#define MT6359_VIO28_ANA_CON0 0x1fac +#define MT6359_VIBR_ANA_CON0 0x1fb0 +#define MT6359_VRF18_ANA_CON0 0x2008 +#define MT6359_VEFUSE_ANA_CON0 0x200c +#define MT6359_VCN18_ANA_CON0 0x2010 +#define MT6359_VCAMIO_ANA_CON0 0x2014 +#define MT6359_VAUD18_ANA_CON0 0x2018 +#define MT6359_VIO18_ANA_CON0 0x201c +#define MT6359_VM18_ANA_CON0 0x2020 +#define MT6359_VUFS_ANA_CON0 0x2024 +#define MT6359_VRF12_ANA_CON0 0x202a +#define MT6359_VCN13_ANA_CON0 0x202e +#define MT6359_VA09_ANA_CON0 0x2032 +#define MT6359_VA12_ANA_CON0 0x2036 +#define MT6359_VXO22_ANA_CON0 0x2088 +#define MT6359_VRFCK_ANA_CON0 0x208c +#define MT6359_VBBCK_ANA_CON0 0x2094 +#define MT6359_AUD_TOP_INT_CON0 0x2328 +#define MT6359_AUD_TOP_INT_STATUS0 0x2334 + +#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0 +#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0 +#define MT6359_RG_BUCK_VPU_LP_SHIFT 1 +#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0 +#define MT6359_DA_VPU_VOSEL_MASK 0x7F +#define MT6359_DA_VPU_VOSEL_SHIFT 0 +#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1 +#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0 +#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0 +#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0 +#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1 +#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0 +#define MT6359_DA_VCORE_VOSEL_MASK 0x7F +#define MT6359_DA_VCORE_VOSEL_SHIFT 0 +#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1 +#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0 +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0 +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4 +#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0 +#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0 +#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0 +#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1 +#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0 +#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F +#define MT6359_DA_VGPU11_VOSEL_SHIFT 0 +#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1 +#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0 +#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0 +#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0 +#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1 +#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0 +#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F +#define MT6359_DA_VMODEM_VOSEL_SHIFT 0 +#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1 +#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0 +#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0 +#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0 +#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1 +#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0 +#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F +#define MT6359_DA_VPROC1_VOSEL_SHIFT 0 +#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1 +#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0 +#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0 +#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0 +#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1 +#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0 +#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F +#define MT6359_DA_VPROC2_VOSEL_SHIFT 0 +#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1 +#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0 +#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0 +#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0 +#define MT6359_RG_BUCK_VS1_LP_SHIFT 1 +#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0 +#define MT6359_DA_VS1_VOSEL_MASK 0x7F +#define MT6359_DA_VS1_VOSEL_SHIFT 0 +#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1 +#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0 +#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0 +#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0 +#define MT6359_RG_BUCK_VS2_LP_SHIFT 1 +#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0 +#define MT6359_DA_VS2_VOSEL_MASK 0x7F +#define MT6359_DA_VS2_VOSEL_SHIFT 0 +#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1 +#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0 +#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F +#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0 +#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0 +#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0 +#define MT6359_RG_BUCK_VPA_LP_SHIFT 1 +#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1 +#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F +#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0 +#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0 +#define MT6359_DA_VPA_VOSEL_MASK 0x3F +#define MT6359_DA_VPA_VOSEL_SHIFT 0 +#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1 +#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2 +#define MT6359_RG_VGPU11_FCCM_SHIFT 9 +#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13 +#define MT6359_RG_VCORE_FCCM_SHIFT 5 +#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3 +#define MT6359_RG_VPROC1_FCCM_SHIFT 1 +#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3 +#define MT6359_RG_VPROC2_FCCM_SHIFT 1 +#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3 +#define MT6359_RG_VMODEM_FCCM_SHIFT 1 +#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3 +#define MT6359_RG_VPU_FCCM_SHIFT 1 +#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0 +#define MT6359_RG_VS1_FPWM_SHIFT 3 +#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0 +#define MT6359_RG_VS2_FPWM_SHIFT 3 +#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0 +#define MT6359_RG_VPA_MODESET_SHIFT 1 +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0 +#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0 +#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON +#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0 +#define MT6359_RG_LDO_VXO22_EN_SHIFT 0 +#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON +#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0 +#define MT6359_RG_LDO_VRF18_EN_SHIFT 0 +#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON +#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0 +#define MT6359_RG_LDO_VRF12_EN_SHIFT 0 +#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON +#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0 +#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0 +#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON +#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0 +#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1 +#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0 +#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON +#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW +#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0 +#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0 +#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON +#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW +#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1 +#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0 +#define MT6359_RG_LDO_VCN13_EN_SHIFT 0 +#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON +#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0 +#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON +#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0 +#define MT6359_RG_LDO_VA09_EN_SHIFT 0 +#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON +#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0 +#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0 +#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON +#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0 +#define MT6359_RG_LDO_VA12_EN_SHIFT 0 +#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON +#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0 +#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON +#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0 +#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON +#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0 +#define MT6359_RG_LDO_VIO18_EN_SHIFT 0 +#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON +#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0 +#define MT6359_RG_LDO_VEMC_EN_SHIFT 0 +#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON +#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0 +#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0 +#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON +#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0 +#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0 +#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON +#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0 +#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1 +#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0 +#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON +#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW +#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1 +#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15 +#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0 +#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0 +#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON +#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0 +#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0 +#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON +#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0 +#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON +#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0 +#define MT6359_RG_LDO_VIBR_EN_SHIFT 0 +#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON +#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0 +#define MT6359_RG_LDO_VIO28_EN_SHIFT 0 +#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON +#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0 +#define MT6359_RG_LDO_VM18_EN_SHIFT 0 +#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON +#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0 +#define MT6359_RG_LDO_VUFS_EN_SHIFT 0 +#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON +#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0 +#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON +#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1 +#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0 +#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON +#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1 +#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0 +#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON +#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1 +#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8 +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F +#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1 +#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0 +#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON +#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1 +#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F +#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8 +#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0 +#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF +#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8 +#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0 +#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF +#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8 +#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0 +#define MT6359_RG_VEMC_VOSEL_MASK 0xF +#define MT6359_RG_VEMC_VOSEL_SHIFT 8 +#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0 +#define MT6359_RG_VSIM1_VOSEL_MASK 0xF +#define MT6359_RG_VSIM1_VOSEL_SHIFT 8 +#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0 +#define MT6359_RG_VSIM2_VOSEL_MASK 0xF +#define MT6359_RG_VSIM2_VOSEL_SHIFT 8 +#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0 +#define MT6359_RG_VIO28_VOSEL_MASK 0xF +#define MT6359_RG_VIO28_VOSEL_SHIFT 8 +#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0 +#define MT6359_RG_VIBR_VOSEL_MASK 0xF +#define MT6359_RG_VIBR_VOSEL_SHIFT 8 +#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0 +#define MT6359_RG_VRF18_VOSEL_MASK 0xF +#define MT6359_RG_VRF18_VOSEL_SHIFT 8 +#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0 +#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF +#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8 +#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0 +#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF +#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8 +#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0 +#define MT6359_RG_VIO18_VOSEL_MASK 0xF +#define MT6359_RG_VIO18_VOSEL_SHIFT 8 +#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0 +#define MT6359_RG_VM18_VOSEL_MASK 0xF +#define MT6359_RG_VM18_VOSEL_SHIFT 8 +#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0 +#define MT6359_RG_VUFS_VOSEL_MASK 0xF +#define MT6359_RG_VUFS_VOSEL_SHIFT 8 +#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0 +#define MT6359_RG_VRF12_VOSEL_MASK 0xF +#define MT6359_RG_VRF12_VOSEL_SHIFT 8 +#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0 +#define MT6359_RG_VCN13_VOSEL_MASK 0xF +#define MT6359_RG_VCN13_VOSEL_SHIFT 8 +#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0 +#define MT6359_RG_VA09_VOSEL_MASK 0xF +#define MT6359_RG_VA09_VOSEL_SHIFT 8 +#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0 +#define MT6359_RG_VA12_VOSEL_MASK 0xF +#define MT6359_RG_VA12_VOSEL_SHIFT 8 +#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0 +#define MT6359_RG_VXO22_VOSEL_MASK 0xF +#define MT6359_RG_VXO22_VOSEL_SHIFT 8 +#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0 +#define MT6359_RG_VRFCK_VOSEL_MASK 0xF +#define MT6359_RG_VRFCK_VOSEL_SHIFT 8 +#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0 +#define MT6359_RG_VBBCK_VOSEL_MASK 0xF +#define MT6359_RG_VBBCK_VOSEL_SHIFT 8 + +#endif /* __MFD_MT6359_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h new file mode 100644 index 000000000000..3d97c1885171 --- /dev/null +++ b/include/linux/mfd/mt6359p/registers.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MFD_MT6359P_REGISTERS_H__ +#define __MFD_MT6359P_REGISTERS_H__ + +#define MT6359P_CHIP_VER 0x5930 + +/* PMIC Registers */ +#define MT6359P_HWCID 0x8 +#define MT6359P_TOP_TRAP 0x50 +#define MT6359P_TOP_TMA_KEY 0x3a8 +#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a +#define MT6359P_BUCK_VCORE_ELR0 0x152c +#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa +#define MT6359P_BUCK_VGPU11_ELR0 0x15b4 +#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44 +#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46 +#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48 +#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a +#define MT6359P_LDO_VEMC_ELR_0 0x1b4c +#define MT6359P_LDO_VFE28_CON0 0x1b88 +#define MT6359P_LDO_VFE28_MON 0x1b8c +#define MT6359P_LDO_VXO22_CON0 0x1b9a +#define MT6359P_LDO_VXO22_MON 0x1b9e +#define MT6359P_LDO_VRF18_CON0 0x1bac +#define MT6359P_LDO_VRF18_MON 0x1bb0 +#define MT6359P_LDO_VRF12_CON0 0x1bbe +#define MT6359P_LDO_VRF12_MON 0x1bc2 +#define MT6359P_LDO_VEFUSE_CON0 0x1bd0 +#define MT6359P_LDO_VEFUSE_MON 0x1bd4 +#define MT6359P_LDO_VCN33_1_CON0 0x1be2 +#define MT6359P_LDO_VCN33_1_MON 0x1be6 +#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4 +#define MT6359P_LDO_VCN33_2_CON0 0x1c08 +#define MT6359P_LDO_VCN33_2_MON 0x1c0c +#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a +#define MT6359P_LDO_VCN13_CON0 0x1c1c +#define MT6359P_LDO_VCN13_MON 0x1c20 +#define MT6359P_LDO_VCN18_CON0 0x1c2e +#define MT6359P_LDO_VCN18_MON 0x1c32 +#define MT6359P_LDO_VA09_CON0 0x1c40 +#define MT6359P_LDO_VA09_MON 0x1c44 +#define MT6359P_LDO_VCAMIO_CON0 0x1c52 +#define MT6359P_LDO_VCAMIO_MON 0x1c56 +#define MT6359P_LDO_VA12_CON0 0x1c64 +#define MT6359P_LDO_VA12_MON 0x1c68 +#define MT6359P_LDO_VAUX18_CON0 0x1c88 +#define MT6359P_LDO_VAUX18_MON 0x1c8c +#define MT6359P_LDO_VAUD18_CON0 0x1c9a +#define MT6359P_LDO_VAUD18_MON 0x1c9e +#define MT6359P_LDO_VIO18_CON0 0x1cac +#define MT6359P_LDO_VIO18_MON 0x1cb0 +#define MT6359P_LDO_VEMC_CON0 0x1cbe +#define MT6359P_LDO_VEMC_MON 0x1cc2 +#define MT6359P_LDO_VSIM1_CON0 0x1cd0 +#define MT6359P_LDO_VSIM1_MON 0x1cd4 +#define MT6359P_LDO_VSIM2_CON0 0x1ce2 +#define MT6359P_LDO_VSIM2_MON 0x1ce6 +#define MT6359P_LDO_VUSB_CON0 0x1d08 +#define MT6359P_LDO_VUSB_MON 0x1d0c +#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a +#define MT6359P_LDO_VRFCK_CON0 0x1d1c +#define MT6359P_LDO_VRFCK_MON 0x1d20 +#define MT6359P_LDO_VBBCK_CON0 0x1d2e +#define MT6359P_LDO_VBBCK_MON 0x1d32 +#define MT6359P_LDO_VBIF28_CON0 0x1d40 +#define MT6359P_LDO_VBIF28_MON 0x1d44 +#define MT6359P_LDO_VIBR_CON0 0x1d52 +#define MT6359P_LDO_VIBR_MON 0x1d56 +#define MT6359P_LDO_VIO28_CON0 0x1d64 +#define MT6359P_LDO_VIO28_MON 0x1d68 +#define MT6359P_LDO_VM18_CON0 0x1d88 +#define MT6359P_LDO_VM18_MON 0x1d8c +#define MT6359P_LDO_VUFS_CON0 0x1d9a +#define MT6359P_LDO_VUFS_MON 0x1d9e +#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88 +#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c +#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90 +#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8 +#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac +#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0 +#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08 +#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c +#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10 +#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28 +#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e +#define MT6359P_LDO_VSRAM_MD_MON 0x1f32 +#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36 +#define MT6359P_VFE28_ANA_CON0 0x1f88 +#define MT6359P_VAUX18_ANA_CON0 0x1f8c +#define MT6359P_VUSB_ANA_CON0 0x1f90 +#define MT6359P_VBIF28_ANA_CON0 0x1f94 +#define MT6359P_VCN33_1_ANA_CON0 0x1f98 +#define MT6359P_VCN33_2_ANA_CON0 0x1f9c +#define MT6359P_VEMC_ANA_CON0 0x1fa0 +#define MT6359P_VSIM1_ANA_CON0 0x1fa2 +#define MT6359P_VSIM2_ANA_CON0 0x1fa6 +#define MT6359P_VIO28_ANA_CON0 0x1faa +#define MT6359P_VIBR_ANA_CON0 0x1fae +#define MT6359P_VFE28_ELR_4 0x1fc0 +#define MT6359P_VRF18_ANA_CON0 0x2008 +#define MT6359P_VEFUSE_ANA_CON0 0x200c +#define MT6359P_VCN18_ANA_CON0 0x2010 +#define MT6359P_VCAMIO_ANA_CON0 0x2014 +#define MT6359P_VAUD18_ANA_CON0 0x2018 +#define MT6359P_VIO18_ANA_CON0 0x201c +#define MT6359P_VM18_ANA_CON0 0x2020 +#define MT6359P_VUFS_ANA_CON0 0x2024 +#define MT6359P_VRF12_ANA_CON0 0x202a +#define MT6359P_VCN13_ANA_CON0 0x202e +#define MT6359P_VA09_ANA_CON0 0x2032 +#define MT6359P_VRF18_ELR_3 0x204e +#define MT6359P_VXO22_ANA_CON0 0x2088 +#define MT6359P_VRFCK_ANA_CON0 0x208c +#define MT6359P_VBBCK_ANA_CON0 0x2096 + +#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0 +#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0 +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F +#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4 +#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR +#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR +#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR +#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR +#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0 +#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF +#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0 +#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0 +#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON +#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0 +#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0 +#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON +#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0 +#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0 +#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON +#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0 +#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0 +#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON +#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0 +#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0 +#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON +#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0 +#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON +#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW +#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15 +#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0 +#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0 +#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON +#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW +#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0 +#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0 +#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON +#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0 +#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON +#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0 +#define MT6359P_RG_LDO_VA09_EN_SHIFT 0 +#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON +#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0 +#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0 +#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON +#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0 +#define MT6359P_RG_LDO_VA12_EN_SHIFT 0 +#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON +#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0 +#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON +#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0 +#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON +#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0 +#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0 +#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON +#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0 +#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0 +#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON +#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0 +#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0 +#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON +#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0 +#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0 +#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON +#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0 +#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON +#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW +#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0 +#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0 +#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON +#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0 +#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0 +#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON +#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0 +#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON +#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0 +#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0 +#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON +#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0 +#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0 +#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON +#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0 +#define MT6359P_RG_LDO_VM18_EN_SHIFT 0 +#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON +#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0 +#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0 +#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON +#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0 +#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON +#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0 +#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON +#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0 +#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON +#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1 +#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB +#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB +#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0 +#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON +#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1 +#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0 +#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0 +#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0 +#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0 +#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0 +#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0 +#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0 +#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0 +#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0 +#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0 +#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0 +#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0 +#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0 +#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0 +#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0 +#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3 +#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4 +#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0 +#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0 +#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0 +#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF +#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4 +#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP +#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY + +#define TMA_KEY 0x9CA6 + +#endif /* __MFD_MT6359P_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h deleted file mode 100644 index ea1304035d4d..000000000000 --- a/include/linux/mfd/mt6360.h +++ /dev/null @@ -1,240 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2020 MediaTek Inc. - */ - -#ifndef __MT6360_H__ -#define __MT6360_H__ - -#include <linux/regmap.h> - -enum { - MT6360_SLAVE_PMU = 0, - MT6360_SLAVE_PMIC, - MT6360_SLAVE_LDO, - MT6360_SLAVE_TCPC, - MT6360_SLAVE_MAX, -}; - -#define MT6360_PMU_SLAVEID (0x34) -#define MT6360_PMIC_SLAVEID (0x1A) -#define MT6360_LDO_SLAVEID (0x64) -#define MT6360_TCPC_SLAVEID (0x4E) - -struct mt6360_pmu_data { - struct i2c_client *i2c[MT6360_SLAVE_MAX]; - struct device *dev; - struct regmap *regmap; - struct regmap_irq_chip_data *irq_data; - unsigned int chip_rev; -}; - -/* PMU register defininition */ -#define MT6360_PMU_DEV_INFO (0x00) -#define MT6360_PMU_CORE_CTRL1 (0x01) -#define MT6360_PMU_RST1 (0x02) -#define MT6360_PMU_CRCEN (0x03) -#define MT6360_PMU_RST_PAS_CODE1 (0x04) -#define MT6360_PMU_RST_PAS_CODE2 (0x05) -#define MT6360_PMU_CORE_CTRL2 (0x06) -#define MT6360_PMU_TM_PAS_CODE1 (0x07) -#define MT6360_PMU_TM_PAS_CODE2 (0x08) -#define MT6360_PMU_TM_PAS_CODE3 (0x09) -#define MT6360_PMU_TM_PAS_CODE4 (0x0A) -#define MT6360_PMU_IRQ_IND (0x0B) -#define MT6360_PMU_IRQ_MASK (0x0C) -#define MT6360_PMU_IRQ_SET (0x0D) -#define MT6360_PMU_SHDN_CTRL (0x0E) -#define MT6360_PMU_TM_INF (0x0F) -#define MT6360_PMU_I2C_CTRL (0x10) -#define MT6360_PMU_CHG_CTRL1 (0x11) -#define MT6360_PMU_CHG_CTRL2 (0x12) -#define MT6360_PMU_CHG_CTRL3 (0x13) -#define MT6360_PMU_CHG_CTRL4 (0x14) -#define MT6360_PMU_CHG_CTRL5 (0x15) -#define MT6360_PMU_CHG_CTRL6 (0x16) -#define MT6360_PMU_CHG_CTRL7 (0x17) -#define MT6360_PMU_CHG_CTRL8 (0x18) -#define MT6360_PMU_CHG_CTRL9 (0x19) -#define MT6360_PMU_CHG_CTRL10 (0x1A) -#define MT6360_PMU_CHG_CTRL11 (0x1B) -#define MT6360_PMU_CHG_CTRL12 (0x1C) -#define MT6360_PMU_CHG_CTRL13 (0x1D) -#define MT6360_PMU_CHG_CTRL14 (0x1E) -#define MT6360_PMU_CHG_CTRL15 (0x1F) -#define MT6360_PMU_CHG_CTRL16 (0x20) -#define MT6360_PMU_CHG_AICC_RESULT (0x21) -#define MT6360_PMU_DEVICE_TYPE (0x22) -#define MT6360_PMU_QC_CONTROL1 (0x23) -#define MT6360_PMU_QC_CONTROL2 (0x24) -#define MT6360_PMU_QC30_CONTROL1 (0x25) -#define MT6360_PMU_QC30_CONTROL2 (0x26) -#define MT6360_PMU_USB_STATUS1 (0x27) -#define MT6360_PMU_QC_STATUS1 (0x28) -#define MT6360_PMU_QC_STATUS2 (0x29) -#define MT6360_PMU_CHG_PUMP (0x2A) -#define MT6360_PMU_CHG_CTRL17 (0x2B) -#define MT6360_PMU_CHG_CTRL18 (0x2C) -#define MT6360_PMU_CHRDET_CTRL1 (0x2D) -#define MT6360_PMU_CHRDET_CTRL2 (0x2E) -#define MT6360_PMU_DPDN_CTRL (0x2F) -#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30) -#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31) -#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32) -#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33) -#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34) -#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35) -#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36) -#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37) -#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38) -#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39) -#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A) -#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B) -#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C) -#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D) -#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E) -#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F) -#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40) -#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41) -#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42) -#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43) -#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44) -#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45) -#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46) -#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47) -#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48) -#define MT6360_PMU_BC12_CTRL (0x49) -#define MT6360_PMU_CHG_STAT (0x4A) -#define MT6360_PMU_RESV1 (0x4B) -#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E) -#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F) -#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50) -#define MT6360_PMU_TYPEC_OTP_CTRL (0x51) -#define MT6360_PMU_ADC_BAT_DATA_H (0x52) -#define MT6360_PMU_ADC_BAT_DATA_L (0x53) -#define MT6360_PMU_IMID_BACKBST_ON (0x54) -#define MT6360_PMU_IMID_BACKBST_OFF (0x55) -#define MT6360_PMU_ADC_CONFIG (0x56) -#define MT6360_PMU_ADC_EN2 (0x57) -#define MT6360_PMU_ADC_IDLE_T (0x58) -#define MT6360_PMU_ADC_RPT_1 (0x5A) -#define MT6360_PMU_ADC_RPT_2 (0x5B) -#define MT6360_PMU_ADC_RPT_3 (0x5C) -#define MT6360_PMU_ADC_RPT_ORG1 (0x5D) -#define MT6360_PMU_ADC_RPT_ORG2 (0x5E) -#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F) -#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60) -#define MT6360_PMU_CHG_CTRL19 (0x61) -#define MT6360_PMU_VDDASUPPLY (0x62) -#define MT6360_PMU_BC12_MANUAL (0x63) -#define MT6360_PMU_CHGDET_FUNC (0x64) -#define MT6360_PMU_FOD_CTRL (0x65) -#define MT6360_PMU_CHG_CTRL20 (0x66) -#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67) -#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68) -#define MT6360_PMU_RESV2 (0x69) -#define MT6360_PMU_USBID_CTRL1 (0x6D) -#define MT6360_PMU_USBID_CTRL2 (0x6E) -#define MT6360_PMU_USBID_CTRL3 (0x6F) -#define MT6360_PMU_FLED_CFG (0x70) -#define MT6360_PMU_RESV3 (0x71) -#define MT6360_PMU_FLED1_CTRL (0x72) -#define MT6360_PMU_FLED_STRB_CTRL (0x73) -#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74) -#define MT6360_PMU_FLED1_TOR_CTRL (0x75) -#define MT6360_PMU_FLED2_CTRL (0x76) -#define MT6360_PMU_RESV4 (0x77) -#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78) -#define MT6360_PMU_FLED2_TOR_CTRL (0x79) -#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A) -#define MT6360_PMU_FLED_VMID_RTM (0x7B) -#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C) -#define MT6360_PMU_FLED_PWSEL (0x7D) -#define MT6360_PMU_FLED_EN (0x7E) -#define MT6360_PMU_FLED_Hidden1 (0x7F) -#define MT6360_PMU_RGB_EN (0x80) -#define MT6360_PMU_RGB1_ISNK (0x81) -#define MT6360_PMU_RGB2_ISNK (0x82) -#define MT6360_PMU_RGB3_ISNK (0x83) -#define MT6360_PMU_RGB_ML_ISNK (0x84) -#define MT6360_PMU_RGB1_DIM (0x85) -#define MT6360_PMU_RGB2_DIM (0x86) -#define MT6360_PMU_RGB3_DIM (0x87) -#define MT6360_PMU_RESV5 (0x88) -#define MT6360_PMU_RGB12_Freq (0x89) -#define MT6360_PMU_RGB34_Freq (0x8A) -#define MT6360_PMU_RGB1_Tr (0x8B) -#define MT6360_PMU_RGB1_Tf (0x8C) -#define MT6360_PMU_RGB1_TON_TOFF (0x8D) -#define MT6360_PMU_RGB2_Tr (0x8E) -#define MT6360_PMU_RGB2_Tf (0x8F) -#define MT6360_PMU_RGB2_TON_TOFF (0x90) -#define MT6360_PMU_RGB3_Tr (0x91) -#define MT6360_PMU_RGB3_Tf (0x92) -#define MT6360_PMU_RGB3_TON_TOFF (0x93) -#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94) -#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95) -#define MT6360_PMU_RESV6 (0x97) -#define MT6360_PMU_SPARE1 (0x9A) -#define MT6360_PMU_SPARE2 (0xA0) -#define MT6360_PMU_SPARE3 (0xB0) -#define MT6360_PMU_SPARE4 (0xC0) -#define MT6360_PMU_CHG_IRQ1 (0xD0) -#define MT6360_PMU_CHG_IRQ2 (0xD1) -#define MT6360_PMU_CHG_IRQ3 (0xD2) -#define MT6360_PMU_CHG_IRQ4 (0xD3) -#define MT6360_PMU_CHG_IRQ5 (0xD4) -#define MT6360_PMU_CHG_IRQ6 (0xD5) -#define MT6360_PMU_QC_IRQ (0xD6) -#define MT6360_PMU_FOD_IRQ (0xD7) -#define MT6360_PMU_BASE_IRQ (0xD8) -#define MT6360_PMU_FLED_IRQ1 (0xD9) -#define MT6360_PMU_FLED_IRQ2 (0xDA) -#define MT6360_PMU_RGB_IRQ (0xDB) -#define MT6360_PMU_BUCK1_IRQ (0xDC) -#define MT6360_PMU_BUCK2_IRQ (0xDD) -#define MT6360_PMU_LDO_IRQ1 (0xDE) -#define MT6360_PMU_LDO_IRQ2 (0xDF) -#define MT6360_PMU_CHG_STAT1 (0xE0) -#define MT6360_PMU_CHG_STAT2 (0xE1) -#define MT6360_PMU_CHG_STAT3 (0xE2) -#define MT6360_PMU_CHG_STAT4 (0xE3) -#define MT6360_PMU_CHG_STAT5 (0xE4) -#define MT6360_PMU_CHG_STAT6 (0xE5) -#define MT6360_PMU_QC_STAT (0xE6) -#define MT6360_PMU_FOD_STAT (0xE7) -#define MT6360_PMU_BASE_STAT (0xE8) -#define MT6360_PMU_FLED_STAT1 (0xE9) -#define MT6360_PMU_FLED_STAT2 (0xEA) -#define MT6360_PMU_RGB_STAT (0xEB) -#define MT6360_PMU_BUCK1_STAT (0xEC) -#define MT6360_PMU_BUCK2_STAT (0xED) -#define MT6360_PMU_LDO_STAT1 (0xEE) -#define MT6360_PMU_LDO_STAT2 (0xEF) -#define MT6360_PMU_CHG_MASK1 (0xF0) -#define MT6360_PMU_CHG_MASK2 (0xF1) -#define MT6360_PMU_CHG_MASK3 (0xF2) -#define MT6360_PMU_CHG_MASK4 (0xF3) -#define MT6360_PMU_CHG_MASK5 (0xF4) -#define MT6360_PMU_CHG_MASK6 (0xF5) -#define MT6360_PMU_QC_MASK (0xF6) -#define MT6360_PMU_FOD_MASK (0xF7) -#define MT6360_PMU_BASE_MASK (0xF8) -#define MT6360_PMU_FLED_MASK1 (0xF9) -#define MT6360_PMU_FLED_MASK2 (0xFA) -#define MT6360_PMU_FAULTB_MASK (0xFB) -#define MT6360_PMU_BUCK1_MASK (0xFC) -#define MT6360_PMU_BUCK2_MASK (0xFD) -#define MT6360_PMU_LDO_MASK1 (0xFE) -#define MT6360_PMU_LDO_MASK2 (0xFF) -#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2) - -/* MT6360_PMU_IRQ_SET */ -#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1) -#define MT6360_IRQ_RETRIG BIT(2) - -#define CHIP_VEN_MASK (0xF0) -#define CHIP_VEN_MT6360 (0x50) -#define CHIP_REV_MASK (0x0F) - -#endif /* __MT6360_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 949268581b36..56f210eebc54 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -13,6 +13,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, MT6358_CHIP_ID = 0x58, + MT6359_CHIP_ID = 0x59, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index c3748b53bf7d..068ae1c0f0e8 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -36,6 +36,7 @@ #define RTC_AL_MASK_DOW BIT(4) #define RTC_TC_SEC 0x000a +#define RTC_TC_MTH_MASK 0x000f /* Min, Hour, Dom... register offset to RTC_TC_SEC */ #define RTC_OFFSET_SEC 0 #define RTC_OFFSET_MIN 1 diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index e07f6e61cd38..a96e6d43ca06 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -437,6 +437,87 @@ enum rk809_reg_id { #define RK817_RTC_COMP_LSB_REG 0x10 #define RK817_RTC_COMP_MSB_REG 0x11 +/* RK817 Codec Registers */ +#define RK817_CODEC_DTOP_VUCTL 0x12 +#define RK817_CODEC_DTOP_VUCTIME 0x13 +#define RK817_CODEC_DTOP_LPT_SRST 0x14 +#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15 +#define RK817_CODEC_AREF_RTCFG0 0x16 +#define RK817_CODEC_AREF_RTCFG1 0x17 +#define RK817_CODEC_AADC_CFG0 0x18 +#define RK817_CODEC_AADC_CFG1 0x19 +#define RK817_CODEC_DADC_VOLL 0x1a +#define RK817_CODEC_DADC_VOLR 0x1b +#define RK817_CODEC_DADC_SR_ACL0 0x1e +#define RK817_CODEC_DADC_ALC1 0x1f +#define RK817_CODEC_DADC_ALC2 0x20 +#define RK817_CODEC_DADC_NG 0x21 +#define RK817_CODEC_DADC_HPF 0x22 +#define RK817_CODEC_DADC_RVOLL 0x23 +#define RK817_CODEC_DADC_RVOLR 0x24 +#define RK817_CODEC_AMIC_CFG0 0x27 +#define RK817_CODEC_AMIC_CFG1 0x28 +#define RK817_CODEC_DMIC_PGA_GAIN 0x29 +#define RK817_CODEC_DMIC_LMT1 0x2a +#define RK817_CODEC_DMIC_LMT2 0x2b +#define RK817_CODEC_DMIC_NG1 0x2c +#define RK817_CODEC_DMIC_NG2 0x2d +#define RK817_CODEC_ADAC_CFG0 0x2e +#define RK817_CODEC_ADAC_CFG1 0x2f +#define RK817_CODEC_DDAC_POPD_DACST 0x30 +#define RK817_CODEC_DDAC_VOLL 0x31 +#define RK817_CODEC_DDAC_VOLR 0x32 +#define RK817_CODEC_DDAC_SR_LMT0 0x35 +#define RK817_CODEC_DDAC_LMT1 0x36 +#define RK817_CODEC_DDAC_LMT2 0x37 +#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38 +#define RK817_CODEC_DDAC_RVOLL 0x39 +#define RK817_CODEC_DDAC_RVOLR 0x3a +#define RK817_CODEC_AHP_ANTI0 0x3b +#define RK817_CODEC_AHP_ANTI1 0x3c +#define RK817_CODEC_AHP_CFG0 0x3d +#define RK817_CODEC_AHP_CFG1 0x3e +#define RK817_CODEC_AHP_CP 0x3f +#define RK817_CODEC_ACLASSD_CFG1 0x40 +#define RK817_CODEC_ACLASSD_CFG2 0x41 +#define RK817_CODEC_APLL_CFG0 0x42 +#define RK817_CODEC_APLL_CFG1 0x43 +#define RK817_CODEC_APLL_CFG2 0x44 +#define RK817_CODEC_APLL_CFG3 0x45 +#define RK817_CODEC_APLL_CFG4 0x46 +#define RK817_CODEC_APLL_CFG5 0x47 +#define RK817_CODEC_DI2S_CKM 0x48 +#define RK817_CODEC_DI2S_RSD 0x49 +#define RK817_CODEC_DI2S_RXCR1 0x4a +#define RK817_CODEC_DI2S_RXCR2 0x4b +#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c +#define RK817_CODEC_DI2S_TXCR1 0x4d +#define RK817_CODEC_DI2S_TXCR2 0x4e +#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f + +/* RK817_CODEC_DI2S_CKM */ +#define RK817_I2S_MODE_MASK (0x1 << 0) +#define RK817_I2S_MODE_MST (0x1 << 0) +#define RK817_I2S_MODE_SLV (0x0 << 0) + +/* RK817_CODEC_DDAC_MUTE_MIXCTL */ +#define DACMT_MASK (0x1 << 0) +#define DACMT_ENABLE (0x1 << 0) +#define DACMT_DISABLE (0x0 << 0) + +/* RK817_CODEC_DI2S_RXCR2 */ +#define VDW_RX_24BITS (0x17) +#define VDW_RX_16BITS (0x0f) + +/* RK817_CODEC_DI2S_TXCR2 */ +#define VDW_TX_24BITS (0x17) +#define VDW_TX_16BITS (0x0f) + +/* RK817_CODEC_AMIC_CFG0 */ +#define MIC_DIFF_MASK (0x1 << 7) +#define MIC_DIFF_DIS (0x0 << 7) +#define MIC_DIFF_EN (0x1 << 7) + #define RK817_POWER_EN_REG(i) (0xb1 + (i)) #define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h index a57af878fd0c..4a5966475a35 100644 --- a/include/linux/mfd/rohm-bd70528.h +++ b/include/linux/mfd/rohm-bd70528.h @@ -26,9 +26,7 @@ struct bd70528_data { struct mutex rtc_timer_lock; }; -#define BD70528_BUCK_VOLTS 17 -#define BD70528_BUCK_VOLTS 17 -#define BD70528_BUCK_VOLTS 17 +#define BD70528_BUCK_VOLTS 0x10 #define BD70528_LDO_VOLTS 0x20 #define BD70528_REG_BUCK1_EN 0x0F diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h index c7ab69c87ee8..3b5f3a7db4bd 100644 --- a/include/linux/mfd/rohm-bd71828.h +++ b/include/linux/mfd/rohm-bd71828.h @@ -26,11 +26,11 @@ enum { BD71828_REGULATOR_AMOUNT, }; -#define BD71828_BUCK1267_VOLTS 0xEF -#define BD71828_BUCK3_VOLTS 0x10 -#define BD71828_BUCK4_VOLTS 0x20 -#define BD71828_BUCK5_VOLTS 0x10 -#define BD71828_LDO_VOLTS 0x32 +#define BD71828_BUCK1267_VOLTS 0x100 +#define BD71828_BUCK3_VOLTS 0x20 +#define BD71828_BUCK4_VOLTS 0x40 +#define BD71828_BUCK5_VOLTS 0x20 +#define BD71828_LDO_VOLTS 0x40 /* LDO6 is fixed 1.8V voltage */ #define BD71828_LDO_6_VOLTAGE 1800000 diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h new file mode 100644 index 000000000000..6870de608233 --- /dev/null +++ b/include/linux/mfd/rsmu.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Core interface for Renesas Synchronization Management Unit (SMU) devices. + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ + +#ifndef __LINUX_MFD_RSMU_H +#define __LINUX_MFD_RSMU_H + +/* The supported devices are ClockMatrix, Sabre and SnowLotus */ +enum rsmu_type { + RSMU_CM = 0x34000, + RSMU_SABRE = 0x33810, + RSMU_SL = 0x19850, +}; + +/** + * + * struct rsmu_ddata - device data structure for sub devices. + * + * @dev: i2c/spi device. + * @regmap: i2c/spi bus access. + * @lock: mutex used by sub devices to make sure a series of + * bus access requests are not interrupted. + * @type: RSMU device type. + * @page: i2c/spi bus driver internal use only. + */ +struct rsmu_ddata { + struct device *dev; + struct regmap *regmap; + struct mutex lock; + enum rsmu_type type; + u16 page; +}; +#endif /* __LINUX_MFD_RSMU_H */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index f1631a39acfc..f92fe090473d 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -67,11 +67,8 @@ struct sec_pmic_dev { struct i2c_client *i2c; unsigned long device_type; - int irq_base; int irq; struct regmap_irq_chip_data *irq_data; - - bool wakeup; }; int sec_irq_init(struct sec_pmic_dev *sec_pmic); @@ -81,15 +78,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic); struct sec_platform_data { struct sec_regulator_data *regulators; struct sec_opmode_data *opmode; - int device_type; int num_regulators; - int irq_base; - int (*cfg_pmic_irq)(void); - - bool wakeup; - bool buck_voltage_lock; - int buck_gpios[3]; int buck_ds[3]; unsigned int buck2_voltage[8]; @@ -99,35 +89,12 @@ struct sec_platform_data { unsigned int buck4_voltage[8]; bool buck4_gpiodvs; - int buck_set1; - int buck_set2; - int buck_set3; - int buck2_enable; - int buck3_enable; - int buck4_enable; int buck_default_idx; - int buck2_default_idx; - int buck3_default_idx; - int buck4_default_idx; - int buck_ramp_delay; - int buck2_ramp_delay; - int buck34_ramp_delay; - int buck5_ramp_delay; - int buck16_ramp_delay; - int buck7810_ramp_delay; - int buck9_ramp_delay; - int buck24_ramp_delay; - int buck3_ramp_delay; - int buck7_ramp_delay; - int buck8910_ramp_delay; - - bool buck1_ramp_enable; bool buck2_ramp_enable; bool buck3_ramp_enable; bool buck4_ramp_enable; - bool buck6_ramp_enable; int buck2_init; int buck3_init; diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h index bb8d2e276668..76a943c83c63 100644 --- a/include/linux/mfd/wcd934x/registers.h +++ b/include/linux/mfd/wcd934x/registers.h @@ -18,6 +18,8 @@ #define WCD934X_EFUSE_SENSE_STATE_DEF 0x10 #define WCD934X_EFUSE_SENSE_EN_MASK BIT(0) #define WCD934X_EFUSE_SENSE_ENABLE BIT(0) +#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a +#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b #define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037 #define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038 #define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039 @@ -103,21 +105,58 @@ #define WCD934X_ANA_AMIC3 0x0610 #define WCD934X_ANA_AMIC4 0x0611 #define WCD934X_ANA_MBHC_MECH 0x0614 +#define WCD934X_MBHC_L_DET_EN_MASK BIT(7) +#define WCD934X_MBHC_L_DET_EN BIT(7) +#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6) +#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5) +#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1 +#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4) +#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1 +#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3) +#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1 +#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2) +#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1) +#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0) #define WCD934X_ANA_MBHC_ELECT 0x0615 +#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0) +#define WCD934X_ANA_MBHC_BIAS_EN BIT(0) #define WCD934X_ANA_MBHC_ZDET 0x0616 #define WCD934X_ANA_MBHC_RESULT_1 0x0617 #define WCD934X_ANA_MBHC_RESULT_2 0x0618 #define WCD934X_ANA_MBHC_RESULT_3 0x0619 +#define WCD934X_ANA_MBHC_BTN0 0x061a +#define WCD934X_VTH_MASK GENMASK(7, 2) +#define WCD934X_ANA_MBHC_BTN1 0x061b +#define WCD934X_ANA_MBHC_BTN2 0x061c +#define WCD934X_ANA_MBHC_BTN3 0x061d +#define WCD934X_ANA_MBHC_BTN4 0x061e +#define WCD934X_ANA_MBHC_BTN5 0x061f +#define WCD934X_ANA_MBHC_BTN6 0x0620 +#define WCD934X_ANA_MBHC_BTN7 0x0621 +#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2) #define WCD934X_ANA_MICB1 0x0622 #define WCD934X_MICB_VAL_MASK GENMASK(5, 0) #define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6) +#define WCD934X_MICB_DISABLE 0 +#define WCD934X_MICB_ENABLE 1 +#define WCD934X_MICB_PULL_UP 2 +#define WCD934X_MICB_PULL_DOWN 3 #define WCD934X_ANA_MICB_PULL_UP 0x80 #define WCD934X_ANA_MICB_ENABLE 0x40 #define WCD934X_ANA_MICB_DISABLE 0x0 #define WCD934X_ANA_MICB2 0x0623 +#define WCD934X_ANA_MICB2_ENABLE BIT(6) +#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6) +#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0) +#define WCD934X_ANA_MICB2_RAMP 0x0624 +#define WCD934X_RAMP_EN_MASK BIT(7) +#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2) #define WCD934X_ANA_MICB3 0x0625 #define WCD934X_ANA_MICB4 0x0626 #define WCD934X_BIAS_VBG_FINE_ADJ 0x0629 +#define WCD934X_MBHC_CTL_CLK 0x0656 +#define WCD934X_MBHC_CTL_BCS 0x065a +#define WCD934X_MBHC_STATUS_SPARE_1 0x065b #define WCD934X_MICB1_TEST_CTL_1 0x066b #define WCD934X_MICB1_TEST_CTL_2 0x066c #define WCD934X_MICB2_TEST_CTL_1 0x066e @@ -141,7 +180,11 @@ #define WCD934X_HPH_CNP_WG_CTL 0x06cc #define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7) #define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7) +#define WCD934X_HPH_CNP_WG_TIME 0x06cd #define WCD934X_HPH_OCP_CTL 0x06ce +#define WCD934X_HPH_PA_CTL2 0x06d2 +#define WCD934X_HPHPA_GND_R_MASK BIT(6) +#define WCD934X_HPHPA_GND_L_MASK BIT(4) #define WCD934X_HPH_L_EN 0x06d3 #define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5) #define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0 @@ -152,6 +195,8 @@ #define WCD934X_HPH_OCP_DET_MASK BIT(0) #define WCD934X_HPH_OCP_DET_ENABLE BIT(0) #define WCD934X_HPH_OCP_DET_DISABLE 0 +#define WCD934X_HPH_R_ATEST 0x06d8 +#define WCD934X_HPHPA_GND_OVR_MASK BIT(1) #define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea #define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb #define WCD934X_CLK_SYS_MCLK_PRG 0x0711 @@ -172,7 +217,19 @@ #define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e #define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0) #define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0) +#define WCD934X_MBHC_NEW_CTL_1 0x0720 +#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7) +#define WCD935X_MBHC_CTL_RCO_EN BIT(7) #define WCD934X_MBHC_NEW_CTL_2 0x0721 +#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2) +#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722 +#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6) +#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723 +#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0) +#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4) +#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724 +#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725 +#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726 #define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727 #define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733 #define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735 diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 944aa3aa3035..723985879035 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -303,6 +303,7 @@ struct mhi_controller_config { * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @reg_len: Length of the MHI MMIO region (required) * @fbc_image: Points to firmware image buffer * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table @@ -356,6 +357,7 @@ struct mhi_controller_config { * @fbc_download: MHI host needs to do complete image transfer (optional) * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) + * @mru: the default MRU for the MHI device * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@ -386,6 +388,7 @@ struct mhi_controller { size_t rddm_size; size_t sbl_size; size_t seg_len; + size_t reg_len; struct image_info *fbc_image; struct image_info *rddm_image; struct mhi_chan *mhi_chan; @@ -448,6 +451,7 @@ struct mhi_controller { bool fbc_download; bool wake_set; unsigned long irq_flags; + u32 mru; }; /** diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 416ee6dd2574..3d43c60b49fa 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -39,10 +39,26 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 +#define MICREL_KSZ8_P1_ERRATA 0x00000003 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC #define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104 #define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105 +/* Device specific MII_BMCR (Reg 0) bits */ +/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */ +#define KSZ886X_BMCR_HP_MDIX BIT(5) +/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation + * (transmit on TXP/TXM pins) + */ +#define KSZ886X_BMCR_FORCE_MDI BIT(4) +/* 1 = Disable auto MDI-X */ +#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3) +#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2) +#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1) +#define KSZ886X_BMCR_DISABLE_LED BIT(0) + +#define KSZ886X_CTRL_MDIX_STAT BIT(4) + #endif /* _MICREL_PHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 4bb4e519e3f5..326250996b4e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -28,6 +28,7 @@ enum migrate_reason { MR_NUMA_MISPLACED, MR_CONTIG_RANGE, MR_LONGTERM_PIN, + MR_DEMOTION, MR_TYPES }; @@ -41,7 +42,8 @@ extern int migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, - unsigned long private, enum migrate_mode mode, int reason); + unsigned long private, enum migrate_mode mode, int reason, + unsigned int *ret_succeeded); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); @@ -56,7 +58,7 @@ extern int migrate_page_move_mapping(struct address_space *mapping, static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, - int reason) + int reason, unsigned int *ret_succeeded) { return -ENOSYS; } static inline struct page *alloc_migration_target(struct page *page, unsigned long private) @@ -76,7 +78,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION @@ -95,14 +96,9 @@ static inline void __ClearPageMovable(struct page *page) #endif #ifdef CONFIG_NUMA_BALANCING -extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); #else -static inline bool pmd_trans_migrating(pmd_t pmd) -{ - return false; -} static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { @@ -110,24 +106,6 @@ static inline int migrate_misplaced_page(struct page *page, } #endif /* CONFIG_NUMA_BALANCING */ -#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node); -#else -static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, - struct vm_area_struct *vma, - pmd_t *pmd, pmd_t entry, - unsigned long address, - struct page *page, int node) -{ - return -EAGAIN; -} -#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ - - #ifdef CONFIG_MIGRATION /* @@ -190,6 +168,14 @@ struct migrate_vma { int migrate_vma_setup(struct migrate_vma *args); void migrate_vma_pages(struct migrate_vma *migrate); void migrate_vma_finalize(struct migrate_vma *migrate); +int next_demotion_node(int node); + +#else /* CONFIG_MIGRATION disabled: */ + +static inline int next_demotion_node(int node) +{ + return NUMA_NO_NODE; +} #endif /* CONFIG_MIGRATION */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 219b93cad1dd..12ea29e04293 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -32,7 +32,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); -extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 236a7d04f891..30bb59fe970c 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -630,6 +630,7 @@ struct mlx4_caps { bool wol_port[MLX4_MAX_PORTS + 1]; struct mlx4_rate_limit_caps rl_caps; u32 health_buffer_addrs; + bool map_clock_to_user; }; struct mlx4_buf_list { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 578c4ccae91c..66eaf0aa7f69 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1038,7 +1038,7 @@ enum { struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, - * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation */ u8 status; u8 pcie_control; @@ -1157,6 +1157,9 @@ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_CUR = 1, }; +/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate + * capability memory. + */ enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, @@ -1179,6 +1182,7 @@ enum mlx5_cap_type { MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, + MLX5_CAP_GENERAL_2 = 0x20, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1212,46 +1216,55 @@ enum mlx5_qcam_feature_groups { /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap) + +#define MLX5_CAP_GEN_2(mdev, cap) \ + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) + +#define MLX5_CAP_GEN_2_64(mdev, cap) \ + MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) + +#define MLX5_CAP_GEN_2_MAX(mdev, cap) \ + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ - MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -1291,11 +1304,11 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1317,31 +1330,31 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_eswitch_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) + mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) + MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) #define MLX5_CAP_DEBUG(mdev, cap)\ - MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) @@ -1377,27 +1390,27 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP_DEV_MEM(mdev, cap)\ - MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP64_DEV_MEM(mdev, cap)\ - MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP_TLS(mdev, cap) \ - MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) + MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap) #define MLX5_CAP_DEV_EVENT(mdev, cap)\ - MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET64(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP_IPSEC(mdev, cap)\ - MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) enum { MLX5_CMD_STAT_OK = 0x0, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 020a8f7fdbdd..e23417424373 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -542,6 +542,10 @@ struct mlx5_core_roce { enum { MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, + /* Set during device detach to block any further devices + * creation/deletion on drivers rescan. Unset during device attach. + */ + MLX5_PRIV_FLAGS_DETACH = 1 << 2, }; struct mlx5_adev { @@ -550,6 +554,7 @@ struct mlx5_adev { int idx; }; +struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ struct mlx5_irq_table *irq_table; @@ -576,7 +581,7 @@ struct mlx5_priv { /* end: qp staff */ /* start: alloc staff */ - /* protect buffer alocation according to numa node */ + /* protect buffer allocation according to numa node */ struct mutex alloc_mutex; int numa_node; @@ -602,6 +607,7 @@ struct mlx5_priv { struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; + struct mlx5_ft_pool *ft_pool; struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; @@ -617,8 +623,7 @@ struct mlx5_priv { }; enum mlx5_device_state { - MLX5_DEVICE_STATE_UNINITIALIZED, - MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_UP = 1, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; @@ -724,6 +729,11 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +struct mlx5_hca_cap { + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -735,8 +745,7 @@ struct mlx5_core_dev { char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct { - u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; @@ -1038,8 +1047,7 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1105,7 +1113,7 @@ static inline u8 mlx5_mkey_variant(u32 mkey) } /* Async-atomic event notifier used by mlx5 core to forward FW - * evetns recived from event queue to mlx5 consumers. + * evetns received from event queue to mlx5 consumers. * Optimise event queue dipatching. */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); @@ -1132,6 +1140,8 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev); +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, struct net_device *slave); @@ -1139,6 +1149,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, size_t *offsets); +struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index e49d8c0d4f26..cea6ecb4b73e 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -16,6 +16,7 @@ struct mlx5_eq_param { u8 irq_index; int nent; u64 mask[4]; + cpumask_var_t affinity; }; struct mlx5_eq * diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 17109b65c1ac..4ab5c1fc1270 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -29,11 +29,20 @@ enum { REP_LOADED, }; +enum mlx5_switchdev_event { + MLX5_SWITCHDEV_EVENT_PAIR, + MLX5_SWITCHDEV_EVENT_UNPAIR, +}; + struct mlx5_eswitch_rep; struct mlx5_eswitch_rep_ops { int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + int (*event)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + enum mlx5_switchdev_event event, + void *data); }; struct mlx5_eswitch_rep_data { @@ -63,6 +72,7 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch *from_esw, struct mlx5_eswitch_rep *rep, u32 sqn); #ifdef CONFIG_MLX5_ESWITCH @@ -98,10 +108,11 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, u16 vport_num); /* Reg C1 usage: - * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) > + * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) > * - * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is - * encapsulation tunnel options, and the lowest 8 bits are used for zone id. + * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1 + * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options, + * and the lowest 8 bits are used for zone id. * * Zone id is used to restore CT flow when packet misses on chain. * @@ -109,22 +120,25 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, * on miss and to support inner header rewrite by means of implicit chain 0 * flows. */ +#define ESW_RESERVED_BITS 1 #define ESW_ZONE_ID_BITS 8 -#define ESW_TUN_OPTS_BITS 12 +#define ESW_TUN_OPTS_BITS 11 #define ESW_TUN_ID_BITS 12 #define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS #define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET #define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0) -#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET) -#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET) +#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) +#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ -#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */ +/* 0x7FF is a reserved mapping */ +#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); +struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ @@ -168,6 +182,11 @@ static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) return 0; } +static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) +{ + return NULL; +} + #endif /* CONFIG_MLX5_ESWITCH */ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 1f51f4c3b1af..0106c67e8ccb 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -38,6 +38,8 @@ #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, @@ -87,6 +89,8 @@ enum { FDB_BYPASS_PATH, FDB_TC_OFFLOAD, FDB_FT_OFFLOAD, + FDB_TC_MISS, + FDB_BR_OFFLOAD, FDB_SLOW_PATH, FDB_PER_VPORT, }; @@ -254,10 +258,16 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); +struct mlx5_pkt_reformat_params { + int type; + u8 param_0; + u8 param_1; + size_t size; + void *data; +}; + struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - int reformat_type, - size_t size, - void *reformat_data, + struct mlx5_pkt_reformat_params *params, enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index eb86e80e4643..f3638d09ba77 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -435,7 +435,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_40[0x20]; - u8 reserved_at_60[0x18]; + u8 reserved_at_60[0x2]; + u8 reformat_insert[0x1]; + u8 reformat_remove[0x1]; + u8 reserver_at_64[0x14]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; @@ -862,7 +865,8 @@ struct mlx5_ifc_qos_cap_bits { u8 nic_bw_share[0x1]; u8 nic_rate_limit[0x1]; u8 packet_pacing_uid[0x1]; - u8 reserved_at_c[0x14]; + u8 log_esw_max_sched_depth[0x4]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0xb]; u8 log_max_qos_nic_queue_group[0x5]; @@ -918,7 +922,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 scatter_fcs[0x1]; u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; - u8 reserved_at_1c[0x2]; + u8 tunnel_lro_gre[0x1]; + u8 tunnel_lro_vxlan[0x1]; u8 tunnel_stateless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; @@ -953,9 +958,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { }; enum { - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, + MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, + MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, + MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, }; struct mlx5_ifc_roce_cap_bits { @@ -1296,23 +1301,12 @@ enum { MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, }; -enum { - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, -}; - -enum { - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0, - MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1, - MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2, -}; - struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x1f]; u8 vhca_resource_manager[0x1]; - u8 reserved_at_20[0x3]; + u8 hca_cap_2[0x1]; + u8 reserved_at_21[0x2]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; u8 event_on_vhca_state_active[0x1]; @@ -1520,7 +1514,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; - u8 reserved_at_250[0x8]; + u8 reserved_at_248[0x2]; + u8 umem_uid_0[0x1]; + u8 reserved_at_250[0x5]; u8 log_pg_sz[0x8]; u8 bf[0x1]; @@ -1657,7 +1653,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x33]; + u8 reserved_at_580[0xb]; + u8 log_max_dci_stream_channels[0x5]; + u8 reserved_at_590[0x3]; + u8 log_max_dci_errored_streams[0x5]; + u8 reserved_at_598[0x8]; + + u8 reserved_at_5a0[0x13]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -1732,6 +1734,17 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_7c0[0x40]; }; +struct mlx5_ifc_cmd_hca_cap_2_bits { + u8 reserved_at_0[0xa0]; + + u8 max_reformat_insert_size[0x8]; + u8 max_reformat_insert_offset[0x8]; + u8 max_reformat_remove_size[0x8]; + u8 max_reformat_remove_offset[0x8]; + + u8 reserved_at_c0[0x740]; +}; + enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, @@ -2944,9 +2957,9 @@ enum { }; enum { - MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, + MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1, + MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2, }; struct mlx5_ifc_qpc_bits { @@ -3015,10 +3028,12 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; - u8 reserved_at_3e0[0x8]; + u8 reserved_at_3e0[0x3]; + u8 log_num_dci_stream_channels[0x5]; u8 cqn_snd[0x18]; - u8 reserved_at_400[0x8]; + u8 reserved_at_400[0x3]; + u8 log_num_dci_errored_streams[0x5]; u8 deth_sqpn[0x18]; u8 reserved_at_420[0x20]; @@ -3105,6 +3120,7 @@ struct mlx5_ifc_roce_addr_layout_bits { union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; @@ -3396,12 +3412,6 @@ enum { MLX5_SQC_STATE_ERR = 0x3, }; -enum { - MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, -}; - struct mlx5_ifc_sqc_bits { u8 rlky[0x1]; u8 cd_master[0x1]; @@ -3507,12 +3517,6 @@ enum { MLX5_RQC_STATE_ERR = 0x3, }; -enum { - MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0, - MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1, - MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2, -}; - struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; @@ -3790,8 +3794,8 @@ struct mlx5_ifc_eqc_bits { u8 reserved_at_80[0x20]; - u8 reserved_at_a0[0x18]; - u8 intr[0x8]; + u8 reserved_at_a0[0x14]; + u8 intr[0xc]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; @@ -3917,7 +3921,7 @@ struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; - u8 apu_thread_cq[0x1]; + u8 apu_cq[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -3943,8 +3947,7 @@ struct mlx5_ifc_cqc_bits { u8 cq_period[0xc]; u8 cq_max_count[0x10]; - u8 reserved_at_a0[0x18]; - u8 c_eqn[0x8]; + u8 c_eqn_or_apu_element[0x20]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; @@ -5785,12 +5788,14 @@ struct mlx5_ifc_query_eq_in_bits { }; struct mlx5_ifc_packet_reformat_context_in_bits { - u8 reserved_at_0[0x5]; - u8 reformat_type[0x3]; - u8 reserved_at_8[0xe]; + u8 reformat_type[0x8]; + u8 reserved_at_8[0x4]; + u8 reformat_param_0[0x4]; + u8 reserved_at_10[0x6]; u8 reformat_data_size[0xa]; - u8 reserved_at_20[0x10]; + u8 reformat_param_1[0x8]; + u8 reserved_at_28[0x8]; u8 reformat_data[2][0x8]; u8 more_reformat_data[][0x8]; @@ -5830,12 +5835,20 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 reserved_at_60[0x20]; }; +enum { + MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1, + MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7, + MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9, +}; + enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, + MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, + MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, }; struct mlx5_ifc_alloc_packet_reformat_context_in_bits { @@ -5956,6 +5969,8 @@ enum { MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, + MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F, + MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -11055,6 +11070,11 @@ struct mlx5_ifc_create_sampler_obj_in_bits { struct mlx5_ifc_sampler_obj_bits sampler_object; }; +struct mlx5_ifc_query_sampler_obj_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + struct mlx5_ifc_sampler_obj_bits sampler_object; +}; + enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 98b56b75c625..1a9c9d94cb59 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -11,13 +11,15 @@ enum { }; enum { - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, }; enum { - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT), + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED), }; struct mlx5_ifc_virtio_q_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b7deb790f257..61e48d459b23 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -550,8 +550,8 @@ static inline const char *mlx5_qp_state_str(int state) static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : + MLX5_TIMESTAMP_FORMAT_DEFAULT; } #endif /* MLX5_QP_H */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 028f442530cf..60ffeb6b67ae 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, struct mlx5_hairpin_params *params); void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); +void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index c274f75efcf9..73a52aba448f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -46,7 +46,7 @@ extern int sysctl_page_lock_unfairness; void init_mm_internals(void); -#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ +#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; static inline void set_max_mapnr(unsigned long limit) @@ -125,16 +125,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif /* - * With CONFIG_CFI_CLANG, the compiler replaces function addresses in - * instrumented C code with jump table addresses. Architectures that - * support CFI can define this macro to return the actual function address - * when needed. - */ -#ifndef function_nocfi -#define function_nocfi(x) (x) -#endif - -/* * To prevent common memory management code establishing * a zero page mapping on a read fault. * This macro should be defined within <asm/pgtable.h>. @@ -155,7 +145,7 @@ extern int mmap_rnd_compat_bits __read_mostly; /* This function must be updated when the size of struct page grows above 80 * or reduces below 56. The idea that compiler optimizes out switch() * statement, and only leaves move/store instructions. Also the compiler can - * combine write statments if they are both assignments and can be reordered, + * combine write statements if they are both assignments and can be reordered, * this can result in several of the writes here being dropped. */ #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) @@ -234,7 +224,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp, void **shadowp); +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +#else +#define nth_page(page,n) ((page) + (n)) +#endif /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) @@ -244,6 +238,9 @@ int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) +void setup_initial_init_mm(void *start_code, void *end_code, + void *end_data, void *brk); + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -284,7 +281,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ -#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 @@ -546,7 +542,12 @@ struct vm_fault { pud_t *pud; /* Pointer to pud entry matching * the 'address' */ - pte_t orig_pte; /* Value of PTE at the time of fault */ + union { + pte_t orig_pte; /* Value of PTE at the time of fault */ + pmd_t orig_pmd; /* Value of PMD at the time of fault, + * used by PMD fault only. + */ + }; struct page *cow_page; /* Page handler may use for COW fault */ struct page *page; /* ->fault handlers should return a @@ -827,6 +828,8 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) return kvmalloc_array(n, size, flags | __GFP_ZERO); } +extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, + gfp_t flags); extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); @@ -904,6 +907,7 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); +void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a @@ -1211,8 +1215,8 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); -__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs, - unsigned int flags); +struct page *try_grab_compound_head(struct page *page, int refs, + unsigned int flags); static inline __must_check bool try_get_page(struct page *page) @@ -1341,7 +1345,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, if (!is_cow_mapping(vma->vm_flags)) return false; - if (!atomic_read(&vma->vm_mm->has_pinned)) + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; return page_maybe_dma_pinned(page); @@ -1668,10 +1672,11 @@ struct address_space *page_mapping(struct page *page); static inline bool page_is_pfmemalloc(const struct page *page) { /* - * Page index cannot be this large so this must be - * a pfmemalloc page. + * lru.next has bit 1 set if the page is allocated from the + * pfmemalloc reserves. Callers may simply overwrite it if + * they do not need to preserve that information. */ - return page->index == -1UL; + return (uintptr_t)page->lru.next & BIT(1); } /* @@ -1680,12 +1685,12 @@ static inline bool page_is_pfmemalloc(const struct page *page) */ static inline void set_page_pfmemalloc(struct page *page) { - page->index = -1UL; + page->lru.next = (void *)BIT(1); } static inline void clear_page_pfmemalloc(struct page *page) { - page->index = 0; + page->lru.next = NULL; } /* @@ -1709,8 +1714,8 @@ extern bool can_do_mlock(void); #else static inline bool can_do_mlock(void) { return false; } #endif -extern int user_shm_lock(size_t, struct user_struct *); -extern void user_shm_unlock(size_t, struct user_struct *); +extern int user_shm_lock(size_t, struct ucounts *); +extern void user_shm_unlock(size_t, struct ucounts *); /* * Parameter block passed down to zap_pte_range in exceptional cases. @@ -1719,6 +1724,7 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ + struct page *single_page; /* Locked page to be unmapped */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, @@ -1766,6 +1772,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); +void unmap_mapping_page(struct page *page); void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); void unmap_mapping_range(struct address_space *mapping, @@ -1786,6 +1793,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, BUG(); return -EFAULT; } +static inline void unmap_mapping_page(struct page *page) { } static inline void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows) { } static inline void unmap_mapping_range(struct address_space *mapping, @@ -1840,19 +1848,14 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); -int get_kernel_page(unsigned long start, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); -void __set_page_dirty(struct page *, struct address_space *, int warn); -int __set_page_dirty_nobuffers(struct page *page); -int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); -void account_page_dirtied(struct page *page, struct address_space *mapping); void account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); int set_page_dirty(struct page *page); @@ -2417,7 +2420,7 @@ static inline unsigned long free_initmem_default(int poison) extern char __init_begin[], __init_end[]; return free_reserved_area(&__init_begin, &__init_end, - poison, "unused kernel"); + poison, "unused kernel image (initmem)"); } static inline unsigned long get_num_physpages(void) @@ -2457,7 +2460,7 @@ extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA static inline int early_pfn_to_nid(unsigned long pfn) { return 0; @@ -2471,7 +2474,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); -extern void memmap_init_zone(struct zone *zone); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2578,7 +2580,8 @@ static inline int check_data_rlimit(unsigned long rlim, extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); -extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); @@ -2678,17 +2681,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/* Look up the first VMA which intersects the interval start_addr..end_addr-1, - NULL if none. Assume start_addr < end_addr. */ -static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +static inline +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) { - struct vm_area_struct * vma = find_vma(mm,start_addr); + struct vm_area_struct *vma = find_vma(mm, start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } +/** + * vma_lookup() - Find a VMA at a specific address + * @mm: The process address space. + * @addr: The user address. + * + * Return: The vm_area_struct at the given address, %NULL otherwise. + */ +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = find_vma(mm, addr); + + if (vma && addr < vma->vm_start) + vma = NULL; + + return vma; +} + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; @@ -3046,6 +3077,11 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif +int vmemmap_remap_free(unsigned long start, unsigned long end, + unsigned long reuse); +int vmemmap_remap_alloc(unsigned long start, unsigned long end, + unsigned long reuse, gfp_t gfp_mask); + void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap); @@ -3084,7 +3120,7 @@ extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; -extern void shake_page(struct page *p, int access); +extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5aacc1c10a45..7f8ee09c711f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -97,10 +97,25 @@ struct page { }; struct { /* page_pool used by netstack */ /** - * @dma_addr: might require a 64-bit value on - * 32-bit architectures. + * @pp_magic: magic value to avoid recycling non + * page_pool allocated pages. */ - unsigned long dma_addr[2]; + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + union { + /** + * dma_addr_upper: might require a 64-bit + * value on 32-bit architectures. + */ + unsigned long dma_addr_upper; + /** + * For frag page support, not supported in + * 32-bit architectures with 64-bit DMA. + */ + atomic_long_t pp_frag_count; + }; }; struct { /* slab, slob and slub */ union { @@ -397,7 +412,7 @@ struct mm_struct { unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - /* Base adresses for compatible mmap() */ + /* Base addresses for compatible mmap() */ unsigned long mmap_compat_base; unsigned long mmap_compat_legacy_base; #endif @@ -435,23 +450,6 @@ struct mm_struct { */ atomic_t mm_count; - /** - * @has_pinned: Whether this mm has pinned any pages. This can - * be either replaced in the future by @pinned_vm when it - * becomes stable, or grow into a counter on its own. We're - * aggresive on this bit now - even if the pinned pages were - * unpinned later on, we'll still keep this bit set for the - * lifecycle of this mm just for simplicity. - */ - atomic_t has_pinned; - - /** - * @write_protect_seq: Locked when any thread is write - * protecting pages mapped by this mm to enforce a later COW, - * for instance during page table copying for fork(). - */ - seqcount_t write_protect_seq; - #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif @@ -460,6 +458,18 @@ struct mm_struct { spinlock_t page_table_lock; /* Protects page tables and some * counters */ + /* + * With some kernel config, the current mmap_lock's offset + * inside 'mm_struct' is at 0x120, which is very optimal, as + * its two hot fields 'count' and 'owner' sit in 2 different + * cachelines, and when mmap_lock is highly contended, both + * of the 2 fields will be accessed frequently, current layout + * will help to reduce cache bouncing. + * + * So please be careful with adding new fields before + * mmap_lock, which can easily push the 2 fields into one + * cacheline. + */ struct rw_semaphore mmap_lock; struct list_head mmlist; /* List of maybe swapped mm's. These @@ -480,7 +490,15 @@ struct mm_struct { unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; + /** + * @write_protect_seq: Locked when any thread is write + * protecting pages mapped by this mm to enforce a later COW, + * for instance during page table copying for fork(). + */ + seqcount_t write_protect_seq; + spinlock_t arg_lock; /* protect the below fields */ + unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; diff --git a/include/linux/mman.h b/include/linux/mman.h index 629cefc4ecba..b66e91b8176c 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -31,6 +31,9 @@ /* * The historical set of flags that all mmap implementations implicitly * support when a ->mmap_validate() op is not provided in file_operations. + * + * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the + * kernel. */ #define LEGACY_MAP_MASK (MAP_SHARED \ | MAP_PRIVATE \ @@ -151,7 +154,6 @@ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | arch_calc_vm_flag_bits(flags); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 0540f0156f58..b179f1e3541a 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -101,14 +101,14 @@ static inline bool mmap_write_trylock(struct mm_struct *mm) static inline void mmap_write_unlock(struct mm_struct *mm) { - up_write(&mm->mmap_lock); __mmap_lock_trace_released(mm, true); + up_write(&mm->mmap_lock); } static inline void mmap_write_downgrade(struct mm_struct *mm) { - downgrade_write(&mm->mmap_lock); __mmap_lock_trace_acquire_returned(mm, false, true); + downgrade_write(&mm->mmap_lock); } static inline void mmap_read_lock(struct mm_struct *mm) @@ -140,8 +140,8 @@ static inline bool mmap_read_trylock(struct mm_struct *mm) static inline void mmap_read_unlock(struct mm_struct *mm) { - up_read(&mm->mmap_lock); __mmap_lock_trace_released(mm, false); + up_read(&mm->mmap_lock); } static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) @@ -155,8 +155,8 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { - up_read_non_owner(&mm->mmap_lock); __mmap_lock_trace_released(mm, false); + up_read_non_owner(&mm->mmap_lock); } static inline void mmap_assert_locked(struct mm_struct *mm) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index f9ad35dd6012..37f975875102 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -109,6 +109,7 @@ struct mmc_ext_csd { u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_boot_mult; /* 226 */ u8 raw_sec_trim_mult; /* 229 */ u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ @@ -139,6 +140,8 @@ struct sd_scr { unsigned char cmds; #define SD_SCR_CMD20_SUPPORT (1<<0) #define SD_SCR_CMD23_SUPPORT (1<<1) +#define SD_SCR_CMD48_SUPPORT (1<<2) +#define SD_SCR_CMD58_SUPPORT (1<<3) }; struct sd_ssr { @@ -189,6 +192,25 @@ struct sd_switch_caps { #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) }; +struct sd_ext_reg { + u8 fno; + u8 page; + u16 offset; + u8 rev; + u8 feature_enabled; + u8 feature_support; +/* Power Management Function. */ +#define SD_EXT_POWER_OFF_NOTIFY (1<<0) +#define SD_EXT_POWER_SUSTENANCE (1<<1) +#define SD_EXT_POWER_DOWN_MODE (1<<2) +/* Performance Enhancement Function. */ +#define SD_EXT_PERF_FX_EVENT (1<<0) +#define SD_EXT_PERF_CARD_MAINT (1<<1) +#define SD_EXT_PERF_HOST_MAINT (1<<2) +#define SD_EXT_PERF_CACHE (1<<3) +#define SD_EXT_PERF_CMD_QUEUE (1<<4) +}; + struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; @@ -290,6 +312,8 @@ struct mmc_card { struct sd_scr scr; /* extra SD information */ struct sd_ssr ssr; /* yet more SD information */ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ + struct sd_ext_reg ext_power; /* SD extension reg for PM */ + struct sd_ext_reg ext_perf; /* SD extension reg for PERF */ unsigned int sdio_funcs; /* number of SDIO functions */ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index ab19245e9945..71101d1ec825 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -164,9 +164,8 @@ struct mmc_request { int tag; #ifdef CONFIG_MMC_CRYPTO - bool crypto_enabled; + const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; - u32 data_unit_num; #endif }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c7e7b43600e9..0c0c9a0fdf57 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -153,7 +153,7 @@ struct mmc_host_ops { int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); - /* Check if the card is pulling dat[0:3] low */ + /* Check if the card is pulling dat[0] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ @@ -398,6 +398,7 @@ struct mmc_host { #else #define MMC_CAP2_CRYPTO 0 #endif +#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ int fixed_drv_type; /* fixed driver type for non-removable media */ @@ -632,6 +633,6 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) } int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); -int mmc_abort_tuning(struct mmc_host *host, u32 opcode); +int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); #endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 2236aa540faa..6727576a8755 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -29,6 +29,10 @@ #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ #define SD_APP_SEND_SCR 51 /* adtc R1 */ + /* class 11 */ +#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */ +#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */ + /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 12036619346c..a85c9f0bd470 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -75,6 +75,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 5d0767cb424a..1935d4c72d10 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -9,8 +9,7 @@ struct page; struct vm_area_struct; struct mm_struct; -extern void dump_page(struct page *page, const char *reason); -extern void __dump_page(struct page *page, const char *reason); +void dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 03dee12d2b61..b9b970f7ab45 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -14,4 +14,18 @@ static inline void leave_mm(int cpu) { } #endif +/* + * CPUs that are capable of running user task @p. Must contain at least one + * active CPU. It is assumed that the kernel can run on all CPUs, so calling + * this for a kernel thread is pointless. + * + * By default, we assume a sane, homogeneous system. + */ +#ifndef task_cpu_possible_mask +# define task_cpu_possible_mask(p) cpu_possible_mask +# define task_cpu_possible(cpu, p) true +#else +# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) +#endif + #endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1a6a9eb6d3fa..45fc2c81e370 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -33,7 +33,7 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same * access flags). User should soft dirty the page in the end callback to make - * sure that anyone relying on soft dirtyness catch pages that might be written + * sure that anyone relying on soft dirtiness catch pages that might be written * through non CPU mappings. * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal @@ -41,7 +41,12 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal * a device driver to possibly ignore the invalidation if the - * migrate_pgmap_owner field matches the driver's device private pgmap owner. + * owner field matches the driver's device private pgmap owner. + * + * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no + * longer have exclusive access to the page. When sent during creation of an + * exclusive range the owner will be initialised to the value provided by the + * caller of make_device_exclusive_range(), otherwise the owner will be NULL. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, @@ -51,6 +56,7 @@ enum mmu_notifier_event { MMU_NOTIFY_SOFT_DIRTY, MMU_NOTIFY_RELEASE, MMU_NOTIFY_MIGRATE, + MMU_NOTIFY_EXCLUSIVE, }; #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) @@ -161,7 +167,7 @@ struct mmu_notifier_ops { * decrease the refcount. If the refcount is decreased on * invalidate_range_start() then the VM can free pages as page * table entries are removed. If the refcount is only - * droppped on invalidate_range_end() then the driver itself + * dropped on invalidate_range_end() then the driver itself * will drop the last refcount but it must take care to flush * any secondary tlb before doing the final free on the * page. Pages will no longer be referenced by the linux @@ -190,7 +196,7 @@ struct mmu_notifier_ops { * If invalidate_range() is used to manage a non-CPU TLB with * shared page-tables, it not necessary to implement the * invalidate_range_start()/end() notifiers, as - * invalidate_range() alread catches the points in time when an + * invalidate_range() already catches the points in time when an * external TLB range needs to be flushed. For more in depth * discussion on this see Documentation/vm/mmu_notifier.rst * @@ -269,7 +275,7 @@ struct mmu_notifier_range { unsigned long end; unsigned flags; enum mmu_notifier_event event; - void *migrate_pgmap_owner; + void *owner; }; static inline int mm_has_notifiers(struct mm_struct *mm) @@ -363,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub, * mmu_interval_read_retry() will return true. * * False is not reliable and only suggests a collision may not have - * occured. It can be called many times and does not have to hold the user + * occurred. It can be called many times and does not have to hold the user * provided lock. * * This call can be used as part of loops and other expensive operations to @@ -521,14 +527,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, range->flags = flags; } -static inline void mmu_notifier_range_init_migrate( - struct mmu_notifier_range *range, unsigned int flags, +static inline void mmu_notifier_range_init_owner( + struct mmu_notifier_range *range, + enum mmu_notifier_event event, unsigned int flags, struct vm_area_struct *vma, struct mm_struct *mm, - unsigned long start, unsigned long end, void *pgmap) + unsigned long start, unsigned long end, void *owner) { - mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm, - start, end); - range->migrate_pgmap_owner = pgmap; + mmu_notifier_range_init(range, event, flags, vma, mm, start, end); + range->owner = owner; } #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ @@ -655,8 +661,8 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ _mmu_notifier_range_init(range, start, end) -#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \ - pgmap) \ +#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \ + end, owner) \ _mmu_notifier_range_init(range, start, end) static inline bool diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0d53eba1c383..6a1d79d84675 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/mm_types.h> #include <linux/page-flags.h> +#include <linux/local_lock.h> #include <asm/page.h> /* Free memory management - zoned buddy allocator. */ @@ -113,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype) struct pglist_data; /* - * Add a wild amount of padding here to ensure datas fall into separate + * Add a wild amount of padding here to ensure data fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ @@ -134,10 +135,10 @@ enum numa_stat_item { NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ - NR_VM_NUMA_STAT_ITEMS + NR_VM_NUMA_EVENT_ITEMS }; #else -#define NR_VM_NUMA_STAT_ITEMS 0 +#define NR_VM_NUMA_EVENT_ITEMS 0 #endif enum zone_stat_item { @@ -332,29 +333,55 @@ enum zone_watermarks { NR_WMARK }; +/* + * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional + * for pageblock size for THP if configured. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define NR_PCP_THP 1 +#else +#define NR_PCP_THP 0 +#endif +#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) + +/* + * Shift to encode migratetype and order in the same integer, with order + * in the least significant bits. + */ +#define NR_PCP_ORDER_WIDTH 8 +#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1) + #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) +/* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ + short free_factor; /* batch scaling factor during free */ +#ifdef CONFIG_NUMA + short expire; /* When 0, remote pagesets are drained */ +#endif /* Lists of pages, one per migrate type stored on the pcp-lists */ - struct list_head lists[MIGRATE_PCPTYPES]; + struct list_head lists[NR_PCP_LISTS]; }; -struct per_cpu_pageset { - struct per_cpu_pages pcp; -#ifdef CONFIG_NUMA - s8 expire; - u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; -#endif +struct per_cpu_zonestat { #ifdef CONFIG_SMP - s8 stat_threshold; s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; + s8 stat_threshold; +#endif +#ifdef CONFIG_NUMA + /* + * Low priority inaccurate counters that are only folded + * on demand. Use a large type to avoid the overhead of + * folding during refresh_cpu_vm_stats. + */ + unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #endif }; @@ -484,7 +511,8 @@ struct zone { int node; #endif struct pglist_data *zone_pgdat; - struct per_cpu_pageset __percpu *pageset; + struct per_cpu_pages __percpu *per_cpu_pageset; + struct per_cpu_zonestat __percpu *per_cpu_zonestats; /* * the high and batch values are copied to individual pagesets for * faster access @@ -512,6 +540,10 @@ struct zone { * is calculated as: * present_pages = spanned_pages - absent_pages(pages in holes); * + * present_early_pages is present pages existing within the zone + * located on memory available since early boot, excluding hotplugged + * memory. + * * managed_pages is present pages managed by the buddy system, which * is calculated as (reserved_pages includes pages allocated by the * bootmem allocator): @@ -544,6 +576,9 @@ struct zone { atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; +#if defined(CONFIG_MEMORY_HOTPLUG) + unsigned long present_early_pages; +#endif #ifdef CONFIG_CMA unsigned long cma_pages; #endif @@ -619,7 +654,7 @@ struct zone { ZONE_PADDING(_pad3_) /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; - atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; + atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; } ____cacheline_internodealigned_in_smp; enum pgdat_flags { @@ -637,6 +672,7 @@ enum zone_flags { ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. * Cleared when kswapd is woken. */ + ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ }; static inline unsigned long zone_managed_pages(struct zone *zone) @@ -738,10 +774,12 @@ struct zonelist { struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; }; -#ifndef CONFIG_DISCONTIGMEM -/* The array of struct pages - for discontigmem use pgdat->lmem_map */ +/* + * The array of struct pages for flatmem. + * It must be declared for SPARSEMEM as well because there are configurations + * that rely on that. + */ extern struct page *mem_map; -#endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split { @@ -775,7 +813,7 @@ typedef struct pglist_data { struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; /* number of populated zones in this node */ -#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ +#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION struct page_ext *node_page_ext; @@ -815,6 +853,7 @@ typedef struct pglist_data { enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; + bool proactive_compact_trigger; #endif /* * This is a per-node reserve of pages that are not available @@ -865,7 +904,7 @@ typedef struct pglist_data { #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_FLAT_NODE_MEM_MAP +#ifdef CONFIG_FLATMEM #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) #else #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) @@ -982,22 +1021,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {} extern int movable_zone; -#ifdef CONFIG_HIGHMEM -static inline int zone_movable_is_highmem(void) -{ -#ifdef CONFIG_NEED_MULTIPLE_NODES - return movable_zone == ZONE_HIGHMEM; -#else - return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; -#endif -} -#endif - static inline int is_highmem_idx(enum zone_type idx) { #ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM || - (idx == ZONE_MOVABLE && zone_movable_is_highmem())); + (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); #else return 0; #endif @@ -1029,7 +1057,7 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); -int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, +int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); @@ -1037,21 +1065,24 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int numa_zonelist_order_handler(struct ctl_table *, int, void *, size_t *, loff_t *); -extern int percpu_pagelist_fraction; +extern int percpu_pagelist_high_fraction; extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA extern struct pglist_data contig_page_data; -#define NODE_DATA(nid) (&contig_page_data) +static inline struct pglist_data *NODE_DATA(int nid) +{ + return &contig_page_data; +} #define NODE_MEM_MAP(nid) mem_map -#else /* CONFIG_NEED_MULTIPLE_NODES */ +#else /* CONFIG_NUMA */ #include <asm/mmzone.h> -#endif /* !CONFIG_NEED_MULTIPLE_NODES */ +#endif /* !CONFIG_NUMA */ extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); @@ -1200,8 +1231,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #ifdef CONFIG_SPARSEMEM /* - * SECTION_SHIFT #bits space required to store a section # - * * PA_SECTION_SHIFT physical address to/from section number * PFN_SECTION_SHIFT pfn to/from section number */ @@ -1321,7 +1350,6 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -extern unsigned long __section_nr(struct mem_section *ms); extern size_t mem_section_usage_size(void); /* @@ -1344,7 +1372,7 @@ extern size_t mem_section_usage_size(void); #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) #define SECTION_MAP_LAST_BIT (1UL<<5) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 3 +#define SECTION_NID_SHIFT 6 static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1427,10 +1455,30 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) #endif #ifndef CONFIG_HAVE_ARCH_PFN_VALID +/** + * pfn_valid - check if there is a valid memory map entry for a PFN + * @pfn: the page frame number to check + * + * Check if there is a valid memory map entry aka struct page for the @pfn. + * Note, that availability of the memory map entry does not imply that + * there is actual usable memory at that @pfn. The struct page may + * represent a hole or an unusable page frame. + * + * Return: 1 for PFNs that have memory map entries and 0 otherwise + */ static inline int pfn_valid(unsigned long pfn) { struct mem_section *ms; + /* + * Ensure the upper PAGE_SHIFT bits are clear in the + * pfn. Else it might lead to false positives when + * some of the upper bits are set, but the lower bits + * match a valid pfn. + */ + if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) + return 0; + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; ms = __nr_to_section(pfn_to_section_nr(pfn)); @@ -1484,18 +1532,6 @@ void sparse_init(void); #define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ -/* - * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we - * need to check pfn validity within that MAX_ORDER_NR_PAGES block. - * pfn_valid_within() should be used in this case; we optimise this away - * when we have no holes within a MAX_ORDER_NR_PAGES block. - */ -#ifdef CONFIG_HOLES_IN_ZONE -#define pfn_valid_within(pfn) pfn_valid(pfn) -#else -#define pfn_valid_within(pfn) (1) -#endif - #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 7d45b5f989b0..ae2e75d15b21 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -16,6 +16,10 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) +enum { + PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1, +}; + /** * struct pci_device_id - PCI device ID structure * @vendor: Vendor ID to match (or PCI_ANY_ID) @@ -34,12 +38,14 @@ typedef unsigned long kernel_ulong_t; * Best practice is to use driver_data as an index * into a static list of equivalent device types, * instead of using it as a pointer. + * @override_only: Match only when dev->driver_override is this driver. */ struct pci_device_id { __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ kernel_ulong_t driver_data; /* Data private to the driver */ + __u32 override_only; }; @@ -447,6 +453,7 @@ struct hv_vmbus_device_id { struct rpmsg_device_id { char name[RPMSG_NAME_SIZE]; + kernel_ulong_t driver_data; }; /* i2c */ diff --git a/include/linux/module.h b/include/linux/module.h index 8100bb477d86..c9f1200b2312 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -11,6 +11,7 @@ #include <linux/list.h> #include <linux/stat.h> +#include <linux/buildid.h> #include <linux/compiler.h> #include <linux/cache.h> #include <linux/kmod.h> @@ -369,6 +370,11 @@ struct module { /* Unique handle for this module */ char name[MODULE_NAME_LEN]; +#ifdef CONFIG_STACKTRACE_BUILD_ID + /* Module build ID */ + unsigned char build_id[BUILD_ID_SIZE_MAX]; +#endif + /* Sysfs stuff. */ struct module_kobject mkobj; struct module_attribute *modinfo_attrs; @@ -505,6 +511,11 @@ struct module { struct klp_modinfo *klp_info; #endif +#ifdef CONFIG_PRINTK_INDEX + unsigned int printk_index_size; + struct pi_entry **printk_index_start; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; @@ -636,7 +647,7 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr); const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, - char **modname, + char **modname, const unsigned char **modbuildid, char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -740,6 +751,7 @@ static inline const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, + const unsigned char **modbuildid, char *namebuf) { return NULL; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index eed280fae433..962cd41a2cb5 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_uint; extern int param_set_uint(const char *val, const struct kernel_param *kp); extern int param_get_uint(char *buffer, const struct kernel_param *kp); +int param_set_uint_minmax(const char *val, const struct kernel_param *kp, + unsigned int min, unsigned int max); #define param_check_uint(name, p) __param_check(name, p, unsigned int) extern const struct kernel_param_ops param_ops_long; diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3e5358f4de2f..eb0d1c1db208 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -200,7 +200,7 @@ struct mpi_ec_ctx { unsigned int nbits; /* Number of bits. */ /* Domain parameters. Note that they may not all be set and if set - * the MPIs may be flaged as constant. + * the MPIs may be flagged as constant. */ MPI p; /* Prime specifying the field GF(p). */ MPI a; /* First coefficient of the Weierstrass equation. */ @@ -267,7 +267,7 @@ int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx); /** * mpi_get_size() - returns max size required to store the number * - * @a: A multi precision integer for which we want to allocate a bufer + * @a: A multi precision integer for which we want to allocate a buffer * * Return: size required to store the number */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 6aff469e511d..49cf6eb222e7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -107,7 +107,8 @@ struct ti_sci_inta_msi_desc { * address or data changes * @write_msi_msg_data: Data parameter for the callback. * - * @masked: [PCI MSI/X] Mask bits + * @msi_mask: [PCI MSI] MSI cached mask bits + * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated * @multi_cap: [PCI MSI/X] log2 num of messages supported @@ -139,7 +140,10 @@ struct msi_desc { union { /* PCI MSI/X specific data */ struct { - u32 masked; + union { + u32 msi_mask; + u32 msix_ctrl; + }; struct { u8 is_msix : 1; u8 multiple : 3; @@ -232,11 +236,13 @@ void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); +const struct attribute_group **msi_populate_sysfs(struct device *dev); +void msi_destroy_sysfs(struct device *dev, + const struct attribute_group **msi_irq_groups); + /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index a89955f3cbc8..88227044fc86 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -380,6 +380,8 @@ struct mtd_info { int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; + struct nvmem_device *otp_user_nvmem; + struct nvmem_device *otp_factory_nvmem; /* * Parent device from the MTD partition point of view. diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h index 339ac798568e..a7376f9beddf 100644 --- a/include/linux/mtd/onfi.h +++ b/include/linux/mtd/onfi.h @@ -11,6 +11,7 @@ #define __LINUX_MTD_ONFI_H #include <linux/types.h> +#include <linux/bitfield.h> /* ONFI version bits */ #define ONFI_VERSION_1_0 BIT(1) @@ -24,17 +25,22 @@ #define ONFI_VERSION_4_0 BIT(9) /* ONFI features */ -#define ONFI_FEATURE_16_BIT_BUS (1 << 0) -#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) +#define ONFI_FEATURE_16_BIT_BUS BIT(0) +#define ONFI_FEATURE_NV_DDR BIT(5) +#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7) /* ONFI timing mode, used in both asynchronous and synchronous mode */ -#define ONFI_TIMING_MODE_0 (1 << 0) -#define ONFI_TIMING_MODE_1 (1 << 1) -#define ONFI_TIMING_MODE_2 (1 << 2) -#define ONFI_TIMING_MODE_3 (1 << 3) -#define ONFI_TIMING_MODE_4 (1 << 4) -#define ONFI_TIMING_MODE_5 (1 << 5) -#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) +#define ONFI_DATA_INTERFACE_SDR 0 +#define ONFI_DATA_INTERFACE_NVDDR BIT(4) +#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5) +#define ONFI_TIMING_MODE_0 BIT(0) +#define ONFI_TIMING_MODE_1 BIT(1) +#define ONFI_TIMING_MODE_2 BIT(2) +#define ONFI_TIMING_MODE_3 BIT(3) +#define ONFI_TIMING_MODE_4 BIT(4) +#define ONFI_TIMING_MODE_5 BIT(5) +#define ONFI_TIMING_MODE_UNKNOWN BIT(6) +#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x)) /* ONFI feature number/address */ #define ONFI_FEATURE_NUMBER 256 @@ -49,7 +55,7 @@ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ -#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) +#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2) struct nand_onfi_params { /* rev info and features block */ @@ -93,14 +99,15 @@ struct nand_onfi_params { /* electrical parameter block */ u8 io_pin_capacitance_max; - __le16 async_timing_mode; + __le16 sdr_timing_modes; __le16 program_cache_timing_mode; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_ccs; - __le16 src_sync_timing_mode; - u8 src_ssync_features; + u8 nvddr_timing_modes; + u8 nvddr2_timing_modes; + u8 nvddr_nvddr2_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; @@ -160,7 +167,9 @@ struct onfi_ext_param_page { * @tBERS: Block erase time * @tR: Page read time * @tCCS: Change column setup time - * @async_timing_mode: Supported asynchronous timing mode + * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only) + * @sdr_timing_modes: Supported asynchronous/SDR timing modes + * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes * @vendor_revision: Vendor specific revision number * @vendor: Vendor specific data */ @@ -170,7 +179,9 @@ struct onfi_params { u16 tBERS; u16 tR; u16 tCCS; - u16 async_timing_mode; + bool fast_tCAD; + u16 sdr_timing_modes; + u16 nvddr_timing_modes; u16 vendor_revision; u8 vendor[88]; }; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 29df2f43dcb5..b2f9dd3cbd69 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -24,6 +24,7 @@ #include <linux/types.h> struct nand_chip; +struct gpio_desc; /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 @@ -385,8 +386,8 @@ struct nand_ecc_ctrl { * This struct defines the timing requirements of a SDR NAND chip. * These information can be found in every NAND datasheets and the timings * meaning are described in the ONFI specifications: - * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing - * Parameters) + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf + * (chapter 4.15 Timing Parameters) * * All these timings are expressed in picoseconds. * @@ -472,11 +473,127 @@ struct nand_sdr_timings { }; /** + * struct nand_nvddr_timings - NV-DDR NAND chip timings + * + * This struct defines the timing requirements of a NV-DDR NAND data interface. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf + * (chapter 4.18.2 NV-DDR) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tAC_min: Access window of DQ[7:0] from CLK + * @tAC_max: Access window of DQ[7:0] from CLK + * @tADL_min: ALE to data loading time + * @tCAD_min: Command, Address, Data delay + * @tCAH_min: Command/Address DQ hold time + * @tCALH_min: W/R_n, CLE and ALE hold time + * @tCALS_min: W/R_n, CLE and ALE setup time + * @tCAS_min: Command/address DQ setup time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCK_min: Average clock cycle time + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDQSCK_min: Start of the access window of DQS from CLK + * @tDQSCK_max: End of the access window of DQS from CLK + * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device + * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device + * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device + * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access + * @tDS_min: Data setup time + * @tDSC_min: DQS cycle time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tITC_max: Interface and Timing Mode Change time + * @tQHS_max: Data hold skew factor + * @tRHW_min: Data output cycle to command, address, or data input cycle + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWHR_min: WE# high to RE# low + * @tWRCK_min: W/R_n low to data output cycle + * @tWW_min: WP# transition to WE# low + */ +struct nand_nvddr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tAC_min; + u32 tAC_max; + u32 tADL_min; + u32 tCAD_min; + u32 tCAH_min; + u32 tCALH_min; + u32 tCALS_min; + u32 tCAS_min; + u32 tCEH_min; + u32 tCH_min; + u32 tCK_min; + u32 tCS_min; + u32 tDH_min; + u32 tDQSCK_min; + u32 tDQSCK_max; + u32 tDQSD_min; + u32 tDQSD_max; + u32 tDQSHZ_max; + u32 tDQSQ_max; + u32 tDS_min; + u32 tDSC_min; + u32 tFEAT_max; + u32 tITC_max; + u32 tQHS_max; + u32 tRHW_min; + u32 tRR_min; + u32 tRST_max; + u32 tWB_max; + u32 tWHR_min; + u32 tWRCK_min; + u32 tWW_min; +}; + +/* + * While timings related to the data interface itself are mostly different + * between SDR and NV-DDR, timings related to the internal chip behavior are + * common. IOW, the following entries which describe the internal delays have + * the same definition and are shared in both SDR and NV-DDR timing structures: + * - tADL_min + * - tBERS_max + * - tCCS_min + * - tFEAT_max + * - tPROG_max + * - tR_max + * - tRR_min + * - tRST_max + * - tWB_max + * + * The below macros return the value of a given timing, no matter the interface. + */ +#define NAND_COMMON_TIMING_PS(conf, timing_name) \ + nand_interface_is_sdr(conf) ? \ + nand_get_sdr_timings(conf)->timing_name : \ + nand_get_nvddr_timings(conf)->timing_name + +#define NAND_COMMON_TIMING_MS(conf, timing_name) \ + PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + +#define NAND_COMMON_TIMING_NS(conf, timing_name) \ + PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + +/** * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface + * @NAND_NVDDR_IFACE: Double Data Rate interface */ enum nand_interface_type { NAND_SDR_IFACE, + NAND_NVDDR_IFACE, }; /** @@ -485,6 +602,7 @@ enum nand_interface_type { * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. + * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE. */ struct nand_interface_config { enum nand_interface_type type; @@ -492,24 +610,56 @@ struct nand_interface_config { unsigned int mode; union { struct nand_sdr_timings sdr; + struct nand_nvddr_timings nvddr; }; } timings; }; /** + * nand_interface_is_sdr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_sdr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_SDR_IFACE; +} + +/** + * nand_interface_is_nvddr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_nvddr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_NVDDR_IFACE; +} + +/** * nand_get_sdr_timings - get SDR timing from data interface * @conf: The data interface */ static inline const struct nand_sdr_timings * nand_get_sdr_timings(const struct nand_interface_config *conf) { - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return ERR_PTR(-EINVAL); return &conf->timings.sdr; } /** + * nand_get_nvddr_timings - get NV-DDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_nvddr_timings * +nand_get_nvddr_timings(const struct nand_interface_config *conf) +{ + if (!nand_interface_is_nvddr(conf)) + return ERR_PTR(-EINVAL); + + return &conf->timings.nvddr; +} + +/** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle */ @@ -1413,7 +1563,6 @@ void nand_cleanup(struct nand_chip *chip); * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); -struct gpio_desc; int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms); @@ -1446,4 +1595,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip) return chip->data_buf; } +/* Parse the gpio-cs property */ +int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, + unsigned int *ncs_array); + #endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 98ed91b529ea..f67457748ed8 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -383,6 +383,7 @@ struct spi_nor_flash_parameter; * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations + * @sfdp: the SFDP data of the flash * @controller_ops: SPI NOR controller driver specific operations. * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and @@ -412,6 +413,7 @@ struct spi_nor { bool sst_write_second; u32 flags; enum spi_nor_cmd_ext cmd_ext_type; + struct sfdp *sfdp; const struct spi_nor_controller_ops *controller_ops; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 6bb92f26833e..6988956b8492 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -170,6 +170,28 @@ struct spinand_op; struct spinand_device; #define SPINAND_MAX_ID_LEN 4 +/* + * For erase, write and read operation, we got the following timings : + * tBERS (erase) 1ms to 4ms + * tPROG 300us to 400us + * tREAD 25us to 100us + * In order to minimize latency, the min value is divided by 4 for the + * initial delay, and dividing by 20 for the poll delay. + * For reset, 5us/10us/500us if the device is respectively + * reading/programming/erasing when the RESET occurs. Since we always + * issue a RESET when the device is IDLE, 5us is selected for both initial + * and poll delay. + */ +#define SPINAND_READ_INITIAL_DELAY_US 6 +#define SPINAND_READ_POLL_DELAY_US 5 +#define SPINAND_RESET_INITIAL_DELAY_US 5 +#define SPINAND_RESET_POLL_DELAY_US 5 +#define SPINAND_WRITE_INITIAL_DELAY_US 75 +#define SPINAND_WRITE_POLL_DELAY_US 15 +#define SPINAND_ERASE_INITIAL_DELAY_US 250 +#define SPINAND_ERASE_POLL_DELAY_US 50 + +#define SPINAND_WAITRDY_TIMEOUT_MS 400 /** * struct spinand_id - SPI NAND id structure diff --git a/include/linux/mutex.h b/include/linux/mutex.h index e19323521f9c..8f226d460f51 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,8 +20,17 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> -struct ww_class; -struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* * Simple, straightforward mutexes with strict semantics: @@ -53,7 +62,7 @@ struct ww_acquire_ctx; */ struct mutex { atomic_long_t owner; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif @@ -66,27 +75,6 @@ struct mutex { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - -/* - * This is the control structure for tasks blocked on mutex, - * which resides on the blocked task's kernel stack: - */ -struct mutex_waiter { - struct list_head list; - struct task_struct *task; - struct ww_acquire_ctx *ww_ctx; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -117,19 +105,9 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } @@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ extern bool mutex_is_locked(struct mutex *lock); +#else /* !CONFIG_PREEMPT_RT */ +/* + * Preempt-RT variant based on rtmutexes. + */ +#include <linux/rtmutex.h> + +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ +} + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_rt_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +extern int mutex_trylock(struct mutex *lock); + +static inline void mutex_destroy(struct mutex *lock) { } + +#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) + +#define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_base_init(&(mutex)->rtmutex); \ + __mutex_rt_init((mutex), name, key); \ +} while (0) + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +#endif /* CONFIG_PREEMPT_RT */ + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 47e5679b48e1..000b126acfb6 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -918,12 +918,4 @@ extern void mv64340_irq_init(unsigned int base); -/* Watchdog Platform Device, Driver Data */ -#define MV64x60_WDT_NAME "mv64x60_wdt" - -struct mv64x60_wdt_pdata { - int timeout; /* watchdog expiry in seconds, default 10 */ - int bus_clk; /* bus clock in MHz, default 133 */ -}; - #endif /* __ASM_MV643XX_H */ diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h deleted file mode 100644 index 90a803aa42e8..000000000000 --- a/include/linux/n_r3964.h +++ /dev/null @@ -1,175 +0,0 @@ -/* r3964 linediscipline for linux - * - * ----------------------------------------------------------- - * Copyright by - * Philips Automation Projects - * Kassel (Germany) - * ----------------------------------------------------------- - * This software may be used and distributed according to the terms of - * the GNU General Public License, incorporated herein by reference. - * - * Author: - * L. Haag - * - * $Log: r3964.h,v $ - * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de> - * Fixed HZ usage on 2.6 kernels - * Removed unnecessary include - * - * Revision 1.3 2001/03/18 13:02:24 dwmw2 - * Fix timer usage, use spinlocks properly. - * - * Revision 1.2 2001/03/18 12:53:15 dwmw2 - * Merge changes in 2.4.2 - * - * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2 - * This'll screw the version control - * - * Revision 1.6 1998/09/30 00:40:38 dwmw2 - * Updated to use kernel's N_R3964 if available - * - * Revision 1.4 1998/04/02 20:29:44 lhaag - * select, blocking, ... - * - * Revision 1.3 1998/02/12 18:58:43 root - * fixed some memory leaks - * calculation of checksum characters - * - * Revision 1.2 1998/02/07 13:03:17 root - * ioctl read_telegram - * - * Revision 1.1 1998/02/06 19:19:43 root - * Initial revision - * - * - */ -#ifndef __LINUX_N_R3964_H__ -#define __LINUX_N_R3964_H__ - - -#include <linux/param.h> -#include <uapi/linux/n_r3964.h> - -/* - * Common ascii handshake characters: - */ - -#define STX 0x02 -#define ETX 0x03 -#define DLE 0x10 -#define NAK 0x15 - -/* - * Timeouts (from milliseconds to jiffies) - */ - -#define R3964_TO_QVZ ((550)*HZ/1000) -#define R3964_TO_ZVZ ((220)*HZ/1000) -#define R3964_TO_NO_BUF ((400)*HZ/1000) -#define R3964_NO_TX_ROOM ((100)*HZ/1000) -#define R3964_TO_RX_PANIC ((4000)*HZ/1000) -#define R3964_MAX_RETRIES 5 - - -enum { R3964_IDLE, - R3964_TX_REQUEST, R3964_TRANSMITTING, - R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK, - R3964_WAIT_FOR_RX_BUF, - R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT - }; - -/* - * All open file-handles are 'clients' and are stored in a linked list: - */ - -struct r3964_message; - -struct r3964_client_info { - spinlock_t lock; - struct pid *pid; - unsigned int sig_flags; - - struct r3964_client_info *next; - - struct r3964_message *first_msg; - struct r3964_message *last_msg; - struct r3964_block_header *next_block_to_read; - int msg_count; -}; - - - -struct r3964_block_header; - -/* internal version of client_message: */ -struct r3964_message { - int msg_id; - int arg; - int error_code; - struct r3964_block_header *block; - struct r3964_message *next; -}; - -/* - * Header of received block in rx_buf/tx_buf: - */ - -struct r3964_block_header -{ - unsigned int length; /* length in chars without header */ - unsigned char *data; /* usually data is located - immediately behind this struct */ - unsigned int locks; /* only used in rx_buffer */ - - struct r3964_block_header *next; - struct r3964_client_info *owner; /* =NULL in rx_buffer */ -}; - -/* - * If rx_buf hasn't enough space to store R3964_MTU chars, - * we will reject all incoming STX-requests by sending NAK. - */ - -#define RX_BUF_SIZE 4000 -#define TX_BUF_SIZE 4000 -#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100 - -#define R3964_PARITY 0x0001 -#define R3964_FRAME 0x0002 -#define R3964_OVERRUN 0x0004 -#define R3964_UNKNOWN 0x0008 -#define R3964_BREAK 0x0010 -#define R3964_CHECKSUM 0x0020 -#define R3964_ERROR 0x003f -#define R3964_BCC 0x4000 -#define R3964_DEBUG 0x8000 - - -struct r3964_info { - spinlock_t lock; - struct tty_struct *tty; - unsigned char priority; - unsigned char *rx_buf; /* ring buffer */ - unsigned char *tx_buf; - - struct r3964_block_header *rx_first; - struct r3964_block_header *rx_last; - struct r3964_block_header *tx_first; - struct r3964_block_header *tx_last; - unsigned int tx_position; - unsigned int rx_position; - unsigned char last_rx; - unsigned char bcc; - unsigned int blocks_in_rx_queue; - - struct mutex read_lock; /* serialize r3964_read */ - - struct r3964_client_info *firstClient; - unsigned int state; - unsigned int flags; - - struct timer_list tmr; - int nRetry; -}; - -#endif diff --git a/include/linux/namei.h b/include/linux/namei.h index b9605b2b46e7..e89329bb3134 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -36,9 +36,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; /* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_JUMPED 0x1000 -#define LOOKUP_ROOT 0x2000 -#define LOOKUP_ROOT_GRABBED 0x0008 /* Scoping flags for lookup. */ #define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */ @@ -71,6 +68,7 @@ extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); +struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index f41387a8969f..6b3267b49755 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -4,6 +4,8 @@ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ +#include <linux/auxiliary_bus.h> + #define I40E_CLIENT_STR_LENGTH 10 /* Client interface version should be updated anytime there is a change in the @@ -48,7 +50,7 @@ struct i40e_qv_info { struct i40e_qvlist_info { u32 num_vectors; - struct i40e_qv_info qv_info[1]; + struct i40e_qv_info qv_info[]; }; @@ -78,6 +80,7 @@ struct i40e_info { u8 lanmac[6]; struct net_device *netdev; struct pci_dev *pcidev; + struct auxiliary_device *aux_dev; u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 @@ -100,6 +103,11 @@ struct i40e_info { u32 fw_build; /* firmware build number */ }; +struct i40e_auxiliary_device { + struct auxiliary_device aux_dev; + struct i40e_info *ldev; +}; + #define I40E_CLIENT_RESET_LEVEL_PF 1 #define I40E_CLIENT_RESET_LEVEL_CORE 2 #define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) @@ -187,8 +195,7 @@ static inline bool i40e_client_is_registered(struct i40e_client *client) return test_bit(__I40E_CLIENT_REGISTERED, &client->state); } -/* used by clients */ -int i40e_register_client(struct i40e_client *client); -int i40e_unregister_client(struct i40e_client *client); +void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client); +void i40e_client_device_unregister(struct i40e_info *ldev); #endif /* _I40E_CLIENT_H_ */ diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h new file mode 100644 index 000000000000..e32f6712aee0 --- /dev/null +++ b/include/linux/net/intel/iidc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/dcbnl.h> +#include <linux/device.h> +#include <linux/if_ether.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +#define IIDC_MAX_USER_PRIORITY 8 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + /* Qset TEID returned to the RDMA driver in + * ice_add_rdma_qset and used by RDMA driver + * for calls to ice_del_rdma_qset + */ + u32 teid; /* Qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the Qset should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to pass QoS info */ +struct iidc_qos_params { + struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[IIDC_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u8 num_tc; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + u32 reg; +}; + +struct ice_pf; + +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); + +#define IIDC_RDMA_ROCE_NAME "roce" + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct ice_pf *pf; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct ice_pf *pf, struct iidc_event *event); +}; + +#endif /* _IIDC_H_*/ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 3de38d6a0aea..2c6b9e416225 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -93,7 +93,7 @@ enum { /* * Add your fresh new feature above and remember to update - * netdev_features_strings[] in net/core/ethtool.c and maybe + * netdev_features_strings[] in net/ethtool/common.c and maybe * some feature mask #defines below. Please also describe it * in Documentation/networking/netdev-features.rst. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5cbc950b34df..d79163208dfd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -47,6 +47,7 @@ #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> +#include <linux/rbtree.h> struct netpoll_info; struct device; @@ -208,6 +209,7 @@ struct sk_buff; struct netdev_hw_addr { struct list_head list; + struct rb_node node; unsigned char addr[MAX_ADDR_LEN]; unsigned char type; #define NETDEV_HW_ADDR_T_LAN 1 @@ -224,6 +226,9 @@ struct netdev_hw_addr { struct netdev_hw_addr_list { struct list_head list; int count; + + /* Auxiliary tree for faster lookup on addition and deletion */ + struct rb_root tree; }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -295,18 +300,6 @@ enum netdev_state_t { }; -/* - * This structure holds boot-time configured netdevice settings. They - * are then used in the device probing. - */ -struct netdev_boot_setup { - char name[IFNAMSIZ]; - struct ifmap map; -}; -#define NETDEV_BOOT_SETUP_MAX 8 - -int __init netdev_boot_setup(char *str); - struct gro_list { struct list_head list; int count; @@ -734,13 +727,13 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; struct net_device *dev; - struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif @@ -1086,9 +1079,18 @@ struct netdev_net_notifier { * Test if Media Access Control address is valid for the device. * * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Called when a user requests an ioctl which can't be handled by - * the generic interface code. If not defined ioctls return - * not supported error code. + * Old-style ioctl entry point. This is used internally by the + * appletalk and ieee802154 subsystems but is no longer called by + * the device ioctl handler. + * + * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Used by the bonding driver for its device specific ioctls: + * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, + * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY + * + * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, + * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. * * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); * Used to set network devices bus interface parameters. This interface @@ -1321,6 +1323,9 @@ struct netdev_net_notifier { * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. + * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, + * struct xdp_buff *xdp); + * Get the xmit slave of master device based on the xdp_buff. * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); * This function is used to wake up the softirq, ksoftirqd or kthread * responsible for sending and/or receiving packets on a specific @@ -1361,6 +1366,15 @@ struct net_device_ops { int (*ndo_validate_addr)(struct net_device *dev); int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + int (*ndo_eth_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocbond)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocwandev)(struct net_device *dev, + struct if_settings *ifs); + int (*ndo_siocdevprivate)(struct net_device *dev, + struct ifreq *ifr, + void __user *data, int cmd); int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); int (*ndo_change_mtu)(struct net_device *dev, @@ -1539,6 +1553,8 @@ struct net_device_ops { int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, + struct xdp_buff *xdp); int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); @@ -1805,6 +1821,7 @@ enum netdev_ml_priv_type { * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network * device struct * @mpls_ptr: mpls_dev struct pointer + * @mctp_ptr: MCTP specific data * * @dev_addr: Hw address (before bcast, * because most packets are unicast) @@ -2092,6 +2109,9 @@ struct net_device { #if IS_ENABLED(CONFIG_MPLS_ROUTING) struct mpls_dev __rcu *mpls_ptr; #endif +#if IS_ENABLED(CONFIG_MCTP) + struct mctp_dev __rcu *mctp_ptr; +#endif /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -2917,7 +2937,6 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); -unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); @@ -3289,14 +3308,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { @@ -3915,6 +3926,8 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, return 0; } #endif +int netif_set_real_num_queues(struct net_device *dev, + unsigned int txq, unsigned int rxq); static inline struct netdev_rx_queue * __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) @@ -3948,7 +3961,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. - * (in_irq() || irqs_disabled()) + * (in_hardirq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * @@ -3984,6 +3997,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } +u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog); void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); @@ -4012,10 +4027,16 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); +static inline bool is_socket_ioctl_cmd(unsigned int cmd) +{ + return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; +} +int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); +int put_user_ifreq(struct ifreq *ifr, void __user *arg); int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, - bool *need_copyout); -int dev_ifconf(struct net *net, struct ifconf *, int); -int dev_ethtool(struct net *net, struct ifreq *); + void __user *data, bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf __user *ifc); +int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); @@ -4069,6 +4090,7 @@ typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +u8 dev_xdp_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -4114,7 +4136,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, return NET_RX_DROP; } - skb_scrub_packet(skb, true); + skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); skb->priority = 0; return 0; } @@ -4136,11 +4158,13 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_dec(*dev->pcpu_refcnt); + this_cpu_dec(*dev->pcpu_refcnt); #else - refcount_dec(&dev->dev_refcnt); + refcount_dec(&dev->dev_refcnt); #endif + } } /** @@ -4151,11 +4175,13 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_inc(*dev->pcpu_refcnt); + this_cpu_inc(*dev->pcpu_refcnt); #else - refcount_inc(&dev->dev_refcnt); + refcount_inc(&dev->dev_refcnt); #endif + } } /* Carrier loss detection, dial on demand. The functions netif_carrier_on @@ -4187,8 +4213,8 @@ unsigned long dev_trans_start(struct net_device *dev); void __netdev_watchdog_up(struct net_device *dev); void netif_carrier_on(struct net_device *dev); - void netif_carrier_off(struct net_device *dev); +void netif_carrier_event(struct net_device *dev); /** * netif_dormant_on - mark device as dormant. @@ -4615,6 +4641,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ +static inline void +__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) +{ + memcpy(dev->dev_addr, addr, len); +} + +static inline void dev_addr_set(struct net_device *dev, const u8 *addr) +{ + __dev_addr_set(dev, addr, dev->addr_len); +} + +static inline void +dev_addr_mod(struct net_device *dev, unsigned int offset, + const u8 *addr, size_t len) +{ + memcpy(&dev->dev_addr[offset], addr, len); +} + int dev_addr_add(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); int dev_addr_del(struct net_device *dev, const unsigned char *addr, diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index f0f3a8354c3c..3fda1a508733 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -65,8 +65,8 @@ struct nf_hook_ops; struct sock; struct nf_hook_state { - unsigned int hook; - u_int8_t pf; + u8 hook; + u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; @@ -77,12 +77,18 @@ struct nf_hook_state { typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); +enum nf_hook_ops_type { + NF_HOOK_OP_UNDEFINED, + NF_HOOK_OP_NF_TABLES, +}; + struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; - u_int8_t pf; + u8 pf; + enum nf_hook_ops_type hook_ops_type:8; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 10279c4830ac..ada1296c87d5 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -196,6 +196,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<20) + /* The max revision number supported by any set type + 1 */ #define IPSET_REVISION_MAX 9 diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 0c7d8d1e945d..700ea077ce2d 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -18,6 +18,7 @@ struct ip_conntrack_stat { unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; + unsigned int chaintoolong; }; #define NFCT_INFOMASK 7UL diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 515ce53aa20d..241e005f290a 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -11,6 +11,7 @@ struct nfnl_info { struct net *net; struct sock *sk; const struct nlmsghdr *nlh; + const struct nfgenmsg *nfmsg; struct netlink_ext_ack *extack; }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 07c6ad8f2a02..5897f3dbaf7c 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -36,8 +36,8 @@ struct xt_action_param { const void *matchinfo, *targinfo; }; const struct nf_hook_state *state; - int fragoff; unsigned int thoff; + u16 fragoff; bool hotdrop; }; @@ -238,9 +238,6 @@ struct xt_table { u_int8_t af; /* address/protocol family */ int priority; /* hook order */ - /* called when table is needed in the given netns */ - int (*table_init)(struct net *net); - /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -452,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); +int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net)); +void xt_unregister_template(const struct xt_table *t); + #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a8178253ce53..10a01978bc0d 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -127,4 +127,6 @@ static inline bool ebt_invalid_target(int target) return (target < -NUM_STANDARD_TARGETS || target >= 0); } +int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net)); +void ebt_unregister_template(const struct ebt_table *t); #endif diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 9062adfa2fb9..5d6a4158a9a6 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -102,6 +102,7 @@ struct netfs_cache_resources { const struct netfs_cache_ops *ops; void *cache_priv; void *cache_priv2; + unsigned int debug_id; /* Cookie debug ID */ }; /* @@ -137,7 +138,6 @@ struct netfs_read_request { struct list_head subrequests; /* Requests to fetch I/O from disk or net */ void *netfs_priv; /* Private data for the netfs */ unsigned int debug_id; - unsigned int cookie_debug_id; atomic_t nr_rd_ops; /* Number of read ops in progress */ atomic_t nr_wr_ops; /* Number of write ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ffba254d2098..b9a8b925db43 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -41,6 +41,11 @@ #include <linux/mempool.h> /* + * These are the default for number of transports to different server IPs + */ +#define NFS_MAX_TRANSPORTS 16 + +/* * These are the default flags for swap requests */ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) @@ -84,6 +89,7 @@ struct nfs_open_context { #define NFS_CONTEXT_RESEND_WRITES (1) #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) +#define NFS_CONTEXT_FILE_OPEN (4) int error; struct list_head list; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d71a0e90faeb..2a9acbfe00f0 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -62,6 +62,7 @@ struct nfs_client { u32 cl_minorversion;/* NFSv4 minorversion */ unsigned int cl_nconnect; /* Number of connections */ + unsigned int cl_max_connect; /* max number of xprts allowed */ const char * cl_principal; /* used for machine cred */ #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h index f5ba0fbff72f..222ae8883e85 100644 --- a/include/linux/nfs_ssc.h +++ b/include/linux/nfs_ssc.h @@ -8,6 +8,7 @@ */ #include <linux/nfs_fs.h> +#include <linux/sunrpc/svc.h> extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; @@ -52,6 +53,19 @@ static inline void nfs42_ssc_close(struct file *filep) if (nfs_ssc_client_tbl.ssc_nfs4_ops) (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep); } + +struct nfsd4_ssc_umount_item { + struct list_head nsui_list; + bool nsui_busy; + /* + * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry + * is removed when refcnt drops to 1 and nsui_expire expires. + */ + refcount_t nsui_refcnt; + unsigned long nsui_expire; + struct vfsmount *nsui_vfsmount; + char nsui_ipaddr[RPC_MAX_ADDRBUFLEN]; +}; #endif /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 717ecc87c9e7..e9698b6278a5 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -277,6 +277,7 @@ struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; const struct cred *cred; + struct pnfs_layout_hdr *lo; gfp_t gfp_flags; }; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index ac398e143c9a..567c3ddba2c4 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -119,7 +119,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not - * inlined we will end up with a section mis-match error (of the type of + * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline @@ -515,7 +515,7 @@ static inline int node_random(const nodemask_t *mask) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scrach area. + * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ @@ -528,7 +528,7 @@ static inline int node_random(const nodemask_t *mask) #define NODEMASK_FREE(m) do {} while (0) #endif -/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 2fb373a5c1ed..87069b8459af 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -168,8 +168,6 @@ extern int raw_notifier_call_chain(struct raw_notifier_head *nh, extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); -extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, - unsigned long val_up, unsigned long val_down, void *v); extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, unsigned long val_up, unsigned long val_down, void *v); extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, diff --git a/include/linux/nubus.h b/include/linux/nubus.h index eba50b057f6f..392fc6c53e96 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -86,7 +86,7 @@ extern struct list_head nubus_func_rsrcs; struct nubus_driver { struct device_driver driver; int (*probe)(struct nubus_board *board); - int (*remove)(struct nubus_board *board); + void (*remove)(struct nubus_board *board); }; extern struct bus_type nubus_bus_type; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index edcbd60b88b9..b7c4c4130b65 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -636,8 +636,8 @@ struct nvme_lba_range_type { __u8 type; __u8 attributes; __u8 rsvd2[14]; - __u64 slba; - __u64 nlb; + __le64 slba; + __le64 nlb; __u8 guid[16]; __u8 rsvd48[16]; }; @@ -944,6 +944,13 @@ struct nvme_zone_mgmt_recv_cmd { enum { NVME_ZRA_ZONE_REPORT = 0, NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_ZRASF_ZONE_STATE_EMPTY = 0x01, + NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02, + NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03, + NVME_ZRASF_ZONE_STATE_CLOSED = 0x04, + NVME_ZRASF_ZONE_STATE_READONLY = 0x05, + NVME_ZRASF_ZONE_STATE_FULL = 0x06, + NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07, NVME_REPORT_ZONE_PARTIAL = 1, }; @@ -1504,6 +1511,7 @@ enum { NVME_SC_NS_WRITE_PROTECTED = 0x20, NVME_SC_CMD_INTERRUPTED = 0x21, NVME_SC_TRANSIENT_TR_ERR = 0x22, + NVME_SC_INVALID_IO_CMD_SET = 0x2C, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index e162b757b6d5..104505e9028f 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -25,6 +25,7 @@ enum nvmem_type { NVMEM_TYPE_EEPROM, NVMEM_TYPE_OTP, NVMEM_TYPE_BATTERY_BACKED, + NVMEM_TYPE_FRAM, }; #define NVMEM_DEVID_NONE (-1) @@ -57,6 +58,7 @@ struct nvmem_keepout { * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. + * @of_node: If given, this will be used instead of the parent's of_node. * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. @@ -86,6 +88,7 @@ struct nvmem_config { enum nvmem_type type; bool read_only; bool root_only; + struct device_node *of_node; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; diff --git a/include/linux/of.h b/include/linux/of.h index d8db8d3592fd..6f1c41f109bb 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -896,7 +896,7 @@ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, return -ENOSYS; } -static inline int of_count_phandle_with_args(struct device_node *np, +static inline int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name) { @@ -946,6 +946,11 @@ static inline int of_machine_is_compatible(const char *compat) return 0; } +static inline int of_add_property(struct device_node *np, struct property *prop) +{ + return 0; +} + static inline int of_remove_property(struct device_node *np, struct property *prop) { return 0; @@ -1329,6 +1334,12 @@ static inline int of_get_available_child_count(const struct device_node *np) return num; } +#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __attribute__((unused)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } + #if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ @@ -1338,10 +1349,7 @@ static inline int of_get_available_child_count(const struct device_node *np) .data = (fn == (fn_type)NULL) ? fn : fn } #else #define _OF_DECLARE(table, name, compat, fn, fn_type) \ - static const struct of_device_id __of_table_##name \ - __attribute__((unused)) \ - = { .compatible = compat, \ - .data = (fn == (fn_type)NULL) ? fn : fn } + _OF_DECLARE_STUB(table, name, compat, fn, fn_type) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 88bc943405cd..45598dbec269 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -51,8 +51,8 @@ void __iomem *of_io_request_and_map(struct device_node *device, * the address space flags too. The PCI version uses a BAR number * instead of an absolute index */ -extern const __be32 *of_get_address(struct device_node *dev, int index, - u64 *size, unsigned int *flags); +extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, + u64 *size, unsigned int *flags); extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); @@ -61,6 +61,11 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range_parser *parser, struct of_pci_range *range); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ static inline void __iomem *of_io_request_and_map(struct device_node *device, @@ -75,8 +80,8 @@ static inline u64 of_translate_address(struct device_node *np, return OF_BAD_ADDR; } -static inline const __be32 *of_get_address(struct device_node *dev, int index, - u64 *size, unsigned int *flags) +static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, + u64 *size, unsigned int *flags) { return NULL; } @@ -100,6 +105,19 @@ static inline struct of_pci_range *of_pci_range_parser_one( return NULL; } +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + return -ENOSYS; +} + +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} + static inline bool of_dma_is_coherent(struct device_node *np) { return false; @@ -124,32 +142,16 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) #endif #define of_range_parser_init of_pci_range_parser_init -#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) -extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, - u64 *size, unsigned int *flags); -extern int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r); -extern int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res); -#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ -static inline int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r) +static inline const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags) { - return -ENOSYS; + return __of_get_address(dev, index, -1, size, flags); } -static inline const __be32 *of_get_pci_address(struct device_node *dev, - int bar_no, u64 *size, unsigned int *flags) -{ - return NULL; -} -static inline int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res) +static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags) { - return -ENOSYS; + return __of_get_address(dev, -1, bar_no, size, flags); } -#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index acf820e88952..cf6a65b94d40 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -67,9 +67,6 @@ extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); extern void __init early_init_dt_scan_chosen_arch(unsigned long node); extern void early_init_dt_add_memory_arch(u64 base, u64 size); -extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); -extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, - bool no_map); extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f821095218b0..8bf2ea859653 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -49,7 +49,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern int of_get_named_gpio_flags(struct device_node *np, +extern int of_get_named_gpio_flags(const struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add_data(struct device_node *np, @@ -67,7 +67,7 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); #include <linux/errno.h> /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline int of_get_named_gpio_flags(struct device_node *np, +static inline int of_get_named_gpio_flags(const struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { if (flags) @@ -98,7 +98,8 @@ static inline int of_get_named_gpio_flags(struct device_node *np, * The above example defines four GPIOs, two of which are not specified. * This function will return '4' */ -static inline int of_gpio_named_count(struct device_node *np, const char* propname) +static inline int of_gpio_named_count(const struct device_node *np, + const char *propname) { return of_count_phandle_with_args(np, propname, "#gpio-cells"); } @@ -109,12 +110,12 @@ static inline int of_gpio_named_count(struct device_node *np, const char* propna * * Same as of_gpio_named_count, but hard coded to use the 'gpios' property */ -static inline int of_gpio_count(struct device_node *np) +static inline int of_gpio_count(const struct device_node *np) { return of_gpio_named_count(np, "gpios"); } -static inline int of_get_gpio_flags(struct device_node *np, int index, +static inline int of_get_gpio_flags(const struct device_node *np, int index, enum of_gpio_flags *flags) { return of_get_named_gpio_flags(np, "gpios", index, flags); @@ -129,7 +130,7 @@ static inline int of_get_gpio_flags(struct device_node *np, int index, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_named_gpio(struct device_node *np, +static inline int of_get_named_gpio(const struct device_node *np, const char *propname, int index) { return of_get_named_gpio_flags(np, propname, index, NULL); @@ -143,7 +144,7 @@ static inline int of_get_named_gpio(struct device_node *np, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_gpio(struct device_node *np, int index) +static inline int of_get_gpio(const struct device_node *np, int index) { return of_get_gpio_flags(np, index, NULL); } diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 16f4b3e87f20..55c1eb300a86 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -2,29 +2,18 @@ #ifndef __OF_IOMMU_H #define __OF_IOMMU_H -#include <linux/device.h> -#include <linux/iommu.h> -#include <linux/of.h> +struct device; +struct device_node; +struct iommu_ops; #ifdef CONFIG_OF_IOMMU -extern int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size); - extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id); #else -static inline int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size) -{ - return -EINVAL; -} - static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id) diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 2b05e7f7c238..da633d34ab86 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -72,6 +72,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node * return mdiobus_register(mdio); } +static inline int devm_of_mdiobus_register(struct device *dev, + struct mii_bus *mdio, + struct device_node *np) +{ + return devm_mdiobus_register(dev, mdio); +} + static inline struct mdio_device *of_mdio_find_device(struct device_node *np) { return NULL; diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 8216a4156263..4de2a24cadc9 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -27,11 +27,11 @@ struct reserved_mem_ops { typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); +#ifdef CONFIG_OF_RESERVED_MEM + #define RESERVEDMEM_OF_DECLARE(name, compat, init) \ _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) -#ifdef CONFIG_OF_RESERVED_MEM - int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx); int of_reserved_mem_device_init_by_name(struct device *dev, @@ -39,11 +39,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev, const char *name); void of_reserved_mem_device_release(struct device *dev); -void fdt_init_reserved_mem(void); -void fdt_reserved_mem_save_node(unsigned long node, const char *uname, - phys_addr_t base, phys_addr_t size); struct reserved_mem *of_reserved_mem_lookup(struct device_node *np); #else + +#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ + _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn) + static inline int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx) { @@ -59,9 +60,6 @@ static inline int of_reserved_mem_device_init_by_name(struct device *dev, static inline void of_reserved_mem_device_release(struct device *pdev) { } -static inline void fdt_init_reserved_mem(void) { } -static inline void fdt_reserved_mem_save_node(unsigned long node, - const char *uname, phys_addr_t base, phys_addr_t size) { } static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) { return NULL; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 461b7aa587ba..0f4a8903922a 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -54,6 +54,10 @@ enum OID { OID_md4, /* 1.2.840.113549.2.4 */ OID_md5, /* 1.2.840.113549.2.5 */ + OID_mskrb5, /* 1.2.840.48018.1.2.2 */ + OID_krb5, /* 1.2.840.113554.1.2.2 */ + OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ + /* Microsoft Authenticode & Software Publishing */ OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ @@ -62,6 +66,13 @@ enum OID { OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ + OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */ + + OID_spnego, /* 1.3.6.1.5.5.2 */ + + OID_IAKerb, /* 1.3.6.1.5.2.5 */ + OID_PKU2U, /* 1.3.5.1.5.2.7 */ + OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ @@ -96,6 +107,10 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ + /* Heimdal mechanisms */ + OID_NetlogonMechanism, /* 1.2.752.43.14.2 */ + OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */ + /* EC-RDSA */ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index b7bf735960c2..082841908fe7 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -81,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval); extern void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p); -extern void omap3_gpmc_save_context(void); -extern void omap3_gpmc_restore_context(void); - struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; diff --git a/include/linux/once.h b/include/linux/once.h index 9225ee6d96c7..d361fb14ac3a 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -7,7 +7,7 @@ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags); + unsigned long *flags, struct module *mod); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only @@ -16,7 +16,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, * out the condition into a nop. DO_ONCE() guarantees type safety of * arguments! * - * Not that the following is not equivalent ... + * Note that the following is not equivalent ... * * DO_ONCE(func, arg); * DO_ONCE(func, arg); @@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ - &___flags); \ + &___flags, THIS_MODULE); \ } \ } \ ___ret; \ diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h new file mode 100644 index 000000000000..861e606b820f --- /dev/null +++ b/include/linux/once_lite.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ONCE_LITE_H +#define _LINUX_ONCE_LITE_H + +#include <linux/types.h> + +/* Call a function once. Similar to DO_ONCE(), but does not use jump label + * patching via static keys. + */ +#define DO_ONCE_LITE(func, ...) \ + DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__) +#define DO_ONCE_LITE_IF(condition, func, ...) \ + ({ \ + static bool __section(".data.once") __already_done; \ + bool __ret_do_once = !!(condition); \ + \ + if (unlikely(__ret_do_once && !__already_done)) { \ + __already_done = true; \ + func(__VA_ARGS__); \ + } \ + unlikely(__ret_do_once); \ + }) + +#endif /* _LINUX_ONCE_LITE_H */ diff --git a/include/linux/padata.h b/include/linux/padata.h index a433f13fc4bf..495b16b6b4d7 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -12,6 +12,7 @@ #ifndef PADATA_H #define PADATA_H +#include <linux/refcount.h> #include <linux/compiler_types.h> #include <linux/workqueue.h> #include <linux/spinlock.h> @@ -96,7 +97,7 @@ struct parallel_data { struct padata_shell *ps; struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; - atomic_t refcnt; + refcount_t refcnt; unsigned int seq_nr; unsigned int processed; int cpu; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 04a34c08e0a6..a558d67ee86f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -131,13 +131,16 @@ enum pageflags { #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif -#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) PG_young, PG_idle, #endif #ifdef CONFIG_64BIT PG_arch_2, #endif +#ifdef CONFIG_KASAN_HW_TAGS + PG_skip_kasan_poison, +#endif __NR_PAGEFLAGS, /* Filesystems */ @@ -175,19 +178,21 @@ enum pageflags { PG_reported = PG_uptodate, }; -#ifndef __GENERATING_BOUNDS_H +#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) -struct page; /* forward declaration */ +#ifndef __GENERATING_BOUNDS_H -static inline struct page *compound_head(struct page *page) +static inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) - return (struct page *) (head - 1); - return page; + return head - 1; + return (unsigned long)page; } +#define compound_head(page) ((typeof(page))_compound_head(page)) + static __always_inline int PageTail(struct page *page) { return READ_ONCE(page->compound_head) & 1; @@ -436,13 +441,19 @@ PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif -#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) TESTPAGEFLAG(Young, young, PF_ANY) SETPAGEFLAG(Young, young, PF_ANY) TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +#ifdef CONFIG_KASAN_HW_TAGS +PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) +#else +PAGEFLAG_FALSE(SkipKASanPoison) +#endif + /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set @@ -624,43 +635,6 @@ static inline int PageTransCompound(struct page *page) } /* - * PageTransCompoundMap is the same as PageTransCompound, but it also - * guarantees the primary MMU has the entire compound page mapped - * through pmd_trans_huge, which in turn guarantees the secondary MMUs - * can also map the entire compound page. This allows the secondary - * MMUs to call get_user_pages() only once for each compound page and - * to immediately map the entire compound page with a single secondary - * MMU fault. If there will be a pmd split later, the secondary MMUs - * will get an update through the MMU notifier invalidation through - * split_huge_pmd(). - * - * Unlike PageTransCompound, this is safe to be called only while - * split_huge_pmd() cannot run from under us, like if protected by the - * MMU notifier, otherwise it may result in page->_mapcount check false - * positives. - * - * We have to treat page cache THP differently since every subpage of it - * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE - * mapped in the current process so comparing subpage's _mapcount to - * compound_mapcount to filter out PTE mapped case. - */ -static inline int PageTransCompoundMap(struct page *page) -{ - struct page *head; - - if (!PageTransCompound(page)) - return 0; - - if (PageAnon(page)) - return atomic_read(&page->_mapcount) < 0; - - head = compound_head(page); - /* File THP is PMD mapped and not PTE mapped */ - return atomic_read(&page->_mapcount) == - atomic_read(compound_mapcount_ptr(head)); -} - -/* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. @@ -695,6 +669,18 @@ PAGEFLAG_FALSE(DoubleMap) #endif /* + * Check if a page is currently marked HWPoisoned. Note that this check is + * best effort only and inherently racy: there is no way to synchronize with + * failing hardware. + */ +static inline bool is_page_hwpoison(struct page *page) +{ + if (PageHWPoison(page)) + return true; + return PageHuge(page) && PageHWPoison(compound_head(page)); +} + +/* * For pages that are never mapped to userspace (and aren't PageSlab), * page_type may be used. Because it is initialised to -1, we invert the * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and @@ -757,9 +743,19 @@ PAGE_TYPE_OPS(Buddy, buddy) * relies on this feature is aware that re-onlining the memory block will * require to re-set the pages PageOffline() and not giving them to the * buddy via online_page_callback_t. + * + * There are drivers that mark a page PageOffline() and expect there won't be + * any further access to page content. PFN walkers that read content of random + * pages should check PageOffline() and synchronize with such drivers using + * page_offline_freeze()/page_offline_thaw(). */ PAGE_TYPE_OPS(Offline, offline) +extern void page_offline_freeze(void); +extern void page_offline_thaw(void); +extern void page_offline_begin(void); +extern void page_offline_end(void); + /* * Marks pages in use as page tables. */ @@ -784,6 +780,15 @@ static inline int PageSlabPfmemalloc(struct page *page) return PageActive(page); } +/* + * A version of PageSlabPfmemalloc() for opportunistic checks where the page + * might have been freed under us and not be a PageSlab anymore. + */ +static inline int __PageSlabPfmemalloc(struct page *page) +{ + return PageActive(page); +} + static inline void SetPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); @@ -828,7 +833,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (PAGEFLAGS_MASK & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index aff81ba31bd8..fabb2e1e087f 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -19,7 +19,7 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_OWNER, PAGE_EXT_OWNER_ALLOCATED, -#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, #endif diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index 1e894d34bdce..d8a6aecf99cb 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -6,7 +6,7 @@ #include <linux/page-flags.h> #include <linux/page_ext.h> -#ifdef CONFIG_IDLE_PAGE_TRACKING +#ifdef CONFIG_PAGE_IDLE_FLAG #ifdef CONFIG_64BIT static inline bool page_is_young(struct page *page) @@ -106,7 +106,7 @@ static inline void clear_page_idle(struct page *page) } #endif /* CONFIG_64BIT */ -#else /* !CONFIG_IDLE_PAGE_TRACKING */ +#else /* !CONFIG_PAGE_IDLE_FLAG */ static inline bool page_is_young(struct page *page) { @@ -135,6 +135,6 @@ static inline void clear_page_idle(struct page *page) { } -#endif /* CONFIG_IDLE_PAGE_TRACKING */ +#endif /* CONFIG_PAGE_IDLE_FLAG */ #endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 3468794f83d2..719bfe5108c5 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page, extern void __split_page_owner(struct page *page, unsigned int nr); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); -extern void __dump_page_owner(struct page *page); +extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); @@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason) if (static_branch_unlikely(&page_owner_inited)) __set_page_owner_migrate_reason(page, reason); } -static inline void dump_page_owner(struct page *page) +static inline void dump_page_owner(const struct page *page) { if (static_branch_unlikely(&page_owner_inited)) __dump_page_owner(page); @@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage) static inline void set_page_owner_migrate_reason(struct page *page, int reason) { } -static inline void dump_page_owner(struct page *page) +static inline void dump_page_owner(const struct page *page) { } #endif /* CONFIG_PAGE_OWNER */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index f3318f34fc54..7ad46f45df39 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v) #endif -static inline int page_ref_count(struct page *page) +static inline int page_ref_count(const struct page *page) { return atomic_read(&page->_refcount); } -static inline int page_count(struct page *page) +static inline int page_count(const struct page *page) { return atomic_read(&compound_head(page)->_refcount); } diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h index 3b99e0ec24f2..fe648dfa3a7c 100644 --- a/include/linux/page_reporting.h +++ b/include/linux/page_reporting.h @@ -18,6 +18,9 @@ struct page_reporting_dev_info { /* Current state of page reporting */ atomic_t state; + + /* Minimal order of page reporting */ + unsigned int order; }; /* Tear-down and bring-up for page reporting devices */ diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index fff52ad370c1..973fd731a520 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -54,7 +54,7 @@ extern unsigned int pageblock_order; /* Forward declaration */ struct page; -unsigned long get_pfnblock_flags_mask(struct page *page, +unsigned long get_pfnblock_flags_mask(const struct page *page, unsigned long pfn, unsigned long mask); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e89df447fae3..62db6b0176b9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -516,34 +516,34 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* - * Get index of the page with in radix-tree + * Get index of the page within radix-tree (but not for hugetlb pages). * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_index(struct page *page) { - pgoff_t pgoff; + struct page *head; if (likely(!PageTransTail(page))) return page->index; + head = compound_head(page); /* * We don't initialize ->index for tail pages: calculate based on * head page */ - pgoff = compound_head(page)->index; - pgoff += page - compound_head(page); - return pgoff; + return head->index + page - head; } +extern pgoff_t hugetlb_basepage_index(struct page *page); + /* - * Get the offset in PAGE_SIZE. - * (TODO: hugepage should have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE (even for hugetlb pages). + * (TODO: hugetlb pages should have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_pgoff(struct page *page) { - if (unlikely(PageHeadHuge(page))) - return page->index << compound_order(page); - + if (unlikely(PageHuge(page))) + return hugetlb_basepage_index(page); return page_to_index(page); } @@ -701,6 +701,10 @@ int wait_on_page_writeback_killable(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); +void __set_page_dirty(struct page *, struct address_space *, int warn); +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); + void page_endio(struct page *page, bool is_write, int err); /** @@ -731,7 +735,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); /* * Fault everything in given userspace address range in. */ -static inline int fault_in_pages_writeable(char __user *uaddr, int size) +static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) { char __user *end = uaddr + size - 1; @@ -758,7 +762,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size) return 0; } -static inline int fault_in_pages_readable(const char __user *uaddr, int size) +static inline int fault_in_pages_readable(const char __user *uaddr, size_t size) { volatile char c; const char __user *end = uaddr + size - 1; diff --git a/include/linux/panic.h b/include/linux/panic.h new file mode 100644 index 000000000000..f5844908a089 --- /dev/null +++ b/include/linux/panic.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PANIC_H +#define _LINUX_PANIC_H + +#include <linux/compiler_attributes.h> +#include <linux/types.h> + +struct pt_regs; + +extern long (*panic_blink)(int state); +__printf(1, 2) +void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic(struct pt_regs *regs, const char *msg); +extern void oops_enter(void); +extern void oops_exit(void); +extern bool oops_may_print(void); + +#ifdef CONFIG_SMP +extern unsigned int sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + +extern int panic_timeout; +extern unsigned long panic_print; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int panic_on_warn; + +extern unsigned long panic_on_taint; +extern bool panic_on_taint_nousertaint; + +extern int sysctl_panic_on_rcu_stall; +extern int sysctl_max_rcu_stall_to_panic; +extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + +/* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} + +/* This cannot be an enum because some may be used in assembly source. */ +#define TAINT_PROPRIETARY_MODULE 0 +#define TAINT_FORCED_MODULE 1 +#define TAINT_CPU_OUT_OF_SPEC 2 +#define TAINT_FORCED_RMMOD 3 +#define TAINT_MACHINE_CHECK 4 +#define TAINT_BAD_PAGE 5 +#define TAINT_USER 6 +#define TAINT_DIE 7 +#define TAINT_OVERRIDDEN_ACPI_TABLE 8 +#define TAINT_WARN 9 +#define TAINT_CRAP 10 +#define TAINT_FIRMWARE_WORKAROUND 11 +#define TAINT_OOT_MODULE 12 +#define TAINT_UNSIGNED_MODULE 13 +#define TAINT_SOFTLOCKUP 14 +#define TAINT_LIVEPATCH 15 +#define TAINT_AUX 16 +#define TAINT_RANDSTRUCT 17 +#define TAINT_FLAGS_COUNT 18 +#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) + +struct taint_flag { + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ + bool module; /* also show as a per-module taint flag */ +}; + +extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; + +enum lockdep_ok { + LOCKDEP_STILL_OK, + LOCKDEP_NOW_UNRELIABLE, +}; + +extern const char *print_tainted(void); +extern void add_taint(unsigned flag, enum lockdep_ok); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); + +#endif /* _LINUX_PANIC_H */ diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h new file mode 100644 index 000000000000..41e32483d7a7 --- /dev/null +++ b/include/linux/panic_notifier.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PANIC_NOTIFIERS_H +#define _LINUX_PANIC_NOTIFIERS_H + +#include <linux/notifier.h> +#include <linux/types.h> + +extern struct atomic_notifier_head panic_notifier_list; + +extern bool crash_kexec_post_notifiers; + +#endif /* _LINUX_PANIC_NOTIFIERS_H */ diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 5ba475ca9078..f16de399d2de 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -122,6 +122,9 @@ static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { } static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { } #endif /* CONFIG_PCIE_EDR */ +int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)); +void pci_acpi_clear_companion_lookup_hook(void); + #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index fbdadd4d8377..adea5a4771cf 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -55,6 +55,7 @@ struct pci_ecam_ops { struct pci_config_window { struct resource res; struct resource busr; + unsigned int bus_shift; void *priv; const struct pci_ecam_ops *ops; union { diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h index 662881335c7e..3e2140d7e31d 100644 --- a/include/linux/pci-ep-cfs.h +++ b/include/linux/pci-ep-cfs.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/** +/* * PCI Endpoint ConfigFS header file * * Copyright (C) 2017 Texas Instruments diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index b82c9b100e97..a48778e1a4ee 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/** +/* * PCI Endpoint *Controller* (EPC) header file * * Copyright (C) 2017 Texas Instruments @@ -58,34 +58,36 @@ pci_epc_interface_string(enum pci_epc_interface_type type) * @map_msi_irq: ops to map physical address to MSI address and return MSI data * @start: ops to start the PCI link * @stop: ops to stop the PCI link + * @get_features: ops to get the features supported by the EPC * @owner: the module owner containing the ops */ struct pci_epc_ops { - int (*write_header)(struct pci_epc *epc, u8 func_no, + int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr); - int (*set_bar)(struct pci_epc *epc, u8 func_no, + int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); - void (*clear_bar)(struct pci_epc *epc, u8 func_no, + void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); - int (*map_addr)(struct pci_epc *epc, u8 func_no, + int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr, u64 pci_addr, size_t size); - void (*unmap_addr)(struct pci_epc *epc, u8 func_no, + void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr); - int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); - int (*get_msi)(struct pci_epc *epc, u8 func_no); - int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts, - enum pci_barno, u32 offset); - int (*get_msix)(struct pci_epc *epc, u8 func_no); - int (*raise_irq)(struct pci_epc *epc, u8 func_no, + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupts); + int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); + int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u16 interrupts, enum pci_barno, u32 offset); + int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); + int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num); - int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, + int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); int (*start)(struct pci_epc *epc); void (*stop)(struct pci_epc *epc); const struct pci_epc_features* (*get_features)(struct pci_epc *epc, - u8 func_no); + u8 func_no, u8 vfunc_no); struct module *owner; }; @@ -127,6 +129,8 @@ struct pci_epc_mem { * single window. * @num_windows: number of windows supported by device * @max_functions: max number of functions that can be configured in this EPC + * @max_vfs: Array indicating the maximum number of virtual functions that can + * be associated with each physical function * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops * @function_num_map: bitmap to manage physical function number @@ -140,6 +144,7 @@ struct pci_epc { struct pci_epc_mem *mem; unsigned int num_windows; u8 max_functions; + u8 *max_vfs; struct config_group *group; /* mutex to protect against concurrent access of EP controller */ struct mutex lock; @@ -150,6 +155,8 @@ struct pci_epc { /** * struct pci_epc_features - features supported by a EPC device per function * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up + * @core_init_notifier: indicate cores that can notify about their availability + * for initialization * @msi_capable: indicate if the endpoint function has MSI capability * @msix_capable: indicate if the endpoint function has MSI-X capability * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver @@ -205,31 +212,32 @@ void pci_epc_linkup(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); -int pci_epc_write_header(struct pci_epc *epc, u8 func_no, +int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr); -int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); -void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); -int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u64 pci_addr, size_t size); -void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr); -int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); -int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, - enum pci_barno, u32 offset); -int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); -int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupts); +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no); +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u16 interrupts, enum pci_barno, u32 offset); +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no); +int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); -int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, - u8 func_no); + u8 func_no, u8 vfunc_no); enum pci_barno pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features); enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 6833e2160ef1..009a07147c61 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/** +/* * PCI Endpoint *Function* (EPF) header file * * Copyright (C) 2017 Texas Instruments @@ -85,7 +85,7 @@ struct pci_epf_ops { */ struct pci_epf_driver { int (*probe)(struct pci_epf *epf); - int (*remove)(struct pci_epf *epf); + void (*remove)(struct pci_epf *epf); struct device_driver driver; struct pci_epf_ops *ops; @@ -102,6 +102,8 @@ struct pci_epf_driver { * @phys_addr: physical address that should be mapped to the BAR * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR + * @barno: BAR number + * @flags: flags that are set for the BAR */ struct pci_epf_bar { dma_addr_t phys_addr; @@ -118,8 +120,11 @@ struct pci_epf_bar { * @header: represents standard configuration header * @bar: represents the BAR of EPF device * @msi_interrupts: number of MSI interrupts required by this function - * @func_no: unique function number within this endpoint device + * @msix_interrupts: number of MSI-X interrupts required by this function + * @func_no: unique (physical) function number within this endpoint device + * @vfunc_no: unique virtual function number within a physical function * @epc: the EPC device to which this EPF device is bound + * @epf_pf: the physical EPF device to which this virtual EPF device is bound * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc * @nb: notifier block to notify EPF of any EPC events (like linkup) @@ -130,6 +135,10 @@ struct pci_epf_bar { * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC * @sec_epc_func_no: unique (physical) function number within the secondary EPC * @group: configfs group associated with the EPF device + * @is_bound: indicates if bind notification to function driver has been invoked + * @is_vf: true - virtual function, false - physical function + * @vfunction_num_map: bitmap to manage virtual function number + * @pci_vepf: list of virtual endpoint functions associated with this function */ struct pci_epf { struct device dev; @@ -139,8 +148,10 @@ struct pci_epf { u8 msi_interrupts; u16 msix_interrupts; u8 func_no; + u8 vfunc_no; struct pci_epc *epc; + struct pci_epf *epf_pf; struct pci_epf_driver *driver; struct list_head list; struct notifier_block nb; @@ -153,6 +164,10 @@ struct pci_epf { struct pci_epf_bar sec_epc_bar[6]; u8 sec_epc_func_no; struct config_group *group; + unsigned int is_bound; + unsigned int is_vf; + unsigned long vfunction_num_map; + struct list_head pci_vepf; }; /** @@ -196,4 +211,6 @@ int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf, struct config_group *group); +int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); +void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); #endif /* __LINUX_PCI_EPF_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 24306504226a..cd8aa6fce204 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -49,6 +49,12 @@ PCI_STATUS_SIG_TARGET_ABORT | \ PCI_STATUS_PARITY) +/* Number of reset methods used in pci_reset_fn_methods array in pci.c */ +#define PCI_NUM_RESET_METHODS 7 + +#define PCI_RESET_PROBE true +#define PCI_RESET_DO_RESET false + /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@ -288,21 +294,14 @@ enum pci_bus_speed { enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); -struct pci_cap_saved_data { - u16 cap_nr; - bool cap_extended; - unsigned int size; - u32 data[]; -}; - -struct pci_cap_saved_state { - struct hlist_node next; - struct pci_cap_saved_data cap; +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; }; struct irq_affinity; struct pcie_link_state; -struct pci_vpd; struct pci_sriov; struct pci_p2pdma; struct rcec_ea; @@ -333,6 +332,7 @@ struct pci_dev { struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ struct pci_dev *rcec; /* Associated RCEC device */ #endif + u32 devcap; /* PCIe Device Capabilities */ u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ @@ -388,6 +388,7 @@ struct pci_dev { supported from root to here */ u16 l1ss; /* L1SS Capability pointer */ #endif + unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ pci_channel_state_t error_state; /* Current connectivity state */ @@ -427,7 +428,6 @@ struct pci_dev { unsigned int state_saved:1; unsigned int is_physfn:1; unsigned int is_virtfn:1; - unsigned int reset_fn:1; unsigned int is_hotplug_bridge:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ @@ -473,7 +473,7 @@ struct pci_dev { #ifdef CONFIG_PCI_MSI const struct attribute_group **msi_irq_groups; #endif - struct pci_vpd *vpd; + struct pci_vpd vpd; #ifdef CONFIG_PCIE_DPC u16 dpc_cap; unsigned int dpc_rp_extensions:1; @@ -497,7 +497,7 @@ struct pci_dev { u16 pasid_features; #endif #ifdef CONFIG_PCI_P2PDMA - struct pci_p2pdma *p2pdma; + struct pci_p2pdma __rcu *p2pdma; #endif u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ @@ -505,6 +505,9 @@ struct pci_dev { char *driver_override; /* Driver name to force a match */ unsigned long priv_flags; /* Private flags for the PCI driver */ + + /* These methods index pci_reset_fn_methods[] */ + u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -526,6 +529,16 @@ static inline int pci_channel_offline(struct pci_dev *pdev) return (pdev->error_state != pci_channel_io_normal); } +/* + * Currently in ACPI spec, for each PCI host bridge, PCI Segment + * Group number is limited to a 16-bit value, therefore (int)-1 is + * not a valid PCI domain number, and can be used as a sentinel + * value indicating ->domain_nr is not set by the driver (and + * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with + * pci_bus_find_domain_nr()). + */ +#define PCI_DOMAIN_NR_NOT_SET (-1) + struct pci_host_bridge { struct device dev; struct pci_bus *bus; /* Root bus */ @@ -533,6 +546,7 @@ struct pci_host_bridge { struct pci_ops *child_ops; void *sysdata; int busnr; + int domain_nr; struct list_head windows; /* resource_entry */ struct list_head dma_ranges; /* dma ranges resource list */ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ @@ -862,6 +876,8 @@ struct module; * MSI-X vectors available for distribution to the VFs. * @err_handler: See Documentation/PCI/pci-error-recovery.rst * @groups: Sysfs attribute groups. + * @dev_groups: Attributes attached to the device that will be + * created once it is bound to the driver. * @driver: Driver model structure. * @dynids: List of dynamically added device IDs. */ @@ -879,6 +895,7 @@ struct pci_driver { u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); const struct pci_error_handlers *err_handler; const struct attribute_group **groups; + const struct attribute_group **dev_groups; struct device_driver driver; struct pci_dynids dynids; }; @@ -899,6 +916,35 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** + * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with + * override_only flags. + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * @driver_override: the 32 bit PCI Device override_only + * + * This macro is used to create a struct pci_device_id that matches only a + * driver_override device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID. + */ +#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ + .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, .override_only = (driver_override) + +/** + * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO + * "driver_override" PCI device. + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID and the driver_override will be set to + * PCI_ID_F_VFIO_DRIVER_OVERRIDE. + */ +#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ + PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) + +/** * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID @@ -1225,7 +1271,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, enum pci_bus_speed *speed, enum pcie_link_width *width); void pcie_print_link_status(struct pci_dev *dev); -bool pcie_has_flr(struct pci_dev *dev); +int pcie_reset_flr(struct pci_dev *dev, bool probe); int pcie_flr(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); @@ -1275,12 +1321,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); -struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); -struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, - u16 cap); -int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); -int pci_add_ext_cap_save_buffer(struct pci_dev *dev, - u16 cap, unsigned int size); int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); @@ -1617,10 +1657,23 @@ static inline bool pci_aer_available(void) { return false; } bool pci_ats_disabled(void); +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +bool pcie_ptm_enabled(struct pci_dev *dev); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +static inline bool pcie_ptm_enabled(struct pci_dev *dev) +{ return false; } +#endif + void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); +int pci_dev_trylock(struct pci_dev *dev); +void pci_dev_unlock(struct pci_dev *dev); + /* * PCI domain support. Sometimes called PCI segment (eg by ACPI), * a PCI domain is defined to be a set of PCI buses which share @@ -1734,8 +1787,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { } static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __pci_register_driver(struct pci_driver *drv, - struct module *owner) +static inline int __must_check __pci_register_driver(struct pci_driver *drv, + struct module *owner, + const char *mod_name) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } @@ -1772,6 +1826,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } +static inline int pci_register_io_range(struct fwnode_handle *fwnode, + phys_addr_t addr, resource_size_t size) +{ return -EINVAL; } + static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) @@ -1871,9 +1929,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) #define pci_resource_len(dev,bar) \ - ((pci_resource_start((dev), (bar)) == 0 && \ - pci_resource_end((dev), (bar)) == \ - pci_resource_start((dev), (bar))) ? 0 : \ + ((pci_resource_end((dev), (bar)) == 0) ? 0 : \ \ (pci_resource_end((dev), (bar)) - \ pci_resource_start((dev), (bar)) + 1)) @@ -2240,20 +2296,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) -/* Small Resource Data Type Tag Item Names */ -#define PCI_VPD_STIN_END 0x0f /* End */ - -#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3) - -#define PCI_VPD_SRDT_TIN_MASK 0x78 -#define PCI_VPD_SRDT_LEN_MASK 0x07 -#define PCI_VPD_LRDT_TIN_MASK 0x7f - -#define PCI_VPD_LRDT_TAG_SIZE 3 -#define PCI_VPD_SRDT_TAG_SIZE 1 - -#define PCI_VPD_INFO_FLD_HDR_SIZE 3 - #define PCI_VPD_RO_KEYWORD_PARTNO "PN" #define PCI_VPD_RO_KEYWORD_SERIALNO "SN" #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" @@ -2261,83 +2303,45 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" /** - * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length - * @lrdt: Pointer to the beginning of the Large Resource Data Type tag - * - * Returns the extracted Large Resource Data Type length. - */ -static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) -{ - return (u16)lrdt[1] + ((u16)lrdt[2] << 8); -} - -/** - * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item - * @lrdt: Pointer to the beginning of the Large Resource Data Type tag - * - * Returns the extracted Large Resource Data Type Tag item. - */ -static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) -{ - return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); -} - -/** - * pci_vpd_srdt_size - Extracts the Small Resource Data Type length - * @srdt: Pointer to the beginning of the Small Resource Data Type tag - * - * Returns the extracted Small Resource Data Type length. - */ -static inline u8 pci_vpd_srdt_size(const u8 *srdt) -{ - return (*srdt) & PCI_VPD_SRDT_LEN_MASK; -} - -/** - * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item - * @srdt: Pointer to the beginning of the Small Resource Data Type tag + * pci_vpd_alloc - Allocate buffer and read VPD into it + * @dev: PCI device + * @size: pointer to field where VPD length is returned * - * Returns the extracted Small Resource Data Type Tag Item. + * Returns pointer to allocated buffer or an ERR_PTR in case of failure */ -static inline u8 pci_vpd_srdt_tag(const u8 *srdt) -{ - return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3; -} +void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); /** - * pci_vpd_info_field_size - Extracts the information field length - * @info_field: Pointer to the beginning of an information field header + * pci_vpd_find_id_string - Locate id string in VPD + * @buf: Pointer to buffered VPD data + * @len: The length of the buffer area in which to search + * @size: Pointer to field where length of id string is returned * - * Returns the extracted information field length. + * Returns the index of the id string or -ENOENT if not found. */ -static inline u8 pci_vpd_info_field_size(const u8 *info_field) -{ - return info_field[2]; -} +int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); /** - * pci_vpd_find_tag - Locates the Resource Data Type tag provided - * @buf: Pointer to buffered vpd data - * @len: The length of the vpd buffer - * @rdt: The Resource Data Type to search for + * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section + * @buf: Pointer to buffered VPD data + * @len: The length of the buffer area in which to search + * @kw: The keyword to search for + * @size: Pointer to field where length of found keyword data is returned * - * Returns the index where the Resource Data Type was found or - * -ENOENT otherwise. + * Returns the index of the information field keyword data or -ENOENT if + * not found. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); +int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, + const char *kw, unsigned int *size); /** - * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD - * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search - * @len: The length of the buffer area, relative to off, in which to search - * @kw: The keyword to search for + * pci_vpd_check_csum - Check VPD checksum + * @buf: Pointer to buffered VPD data + * @len: VPD size * - * Returns the index where the information field keyword was found or - * -ENOENT otherwise. + * Returns 1 if VPD has no checksum, otherwise 0 or an errno */ -int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, - unsigned int len, const char *kw); +int pci_vpd_check_csum(const void *buf, unsigned int len); /* PCI <-> OF binding helpers */ #ifdef CONFIG_OF diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index b482e42d7153..3a10d6ec3ee7 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -44,12 +44,14 @@ struct hotplug_slot_ops { int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); - int (*reset_slot) (struct hotplug_slot *slot, int probe); + int (*reset_slot) (struct hotplug_slot *slot, bool probe); }; /** * struct hotplug_slot - used to register a physical slot with the hotplug pci core * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot + * @slot_list: internal list used to track hotplug PCI slots + * @pci_slot: represents a physical slot * @owner: The module owner of this structure * @mod_name: The module name (KBUILD_MODNAME) of this structure */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4c3fa5293d76..011f2f1ea5bb 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -555,6 +555,8 @@ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 +#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c +#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -631,6 +633,8 @@ #define PCI_DEVICE_ID_DELL_RAC4 0x0012 #define PCI_DEVICE_ID_DELL_PERC5 0x0015 +#define PCI_SUBVENDOR_ID_DELL 0x1028 + #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 @@ -1118,6 +1122,7 @@ #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 @@ -2448,7 +2453,8 @@ #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 -#define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */ +#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */ #define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 @@ -2640,6 +2646,7 @@ #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_82425 0x0486 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 2cb5188a7ef1..add077a81b21 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -10,37 +10,33 @@ #include <linux/phy.h> #include <linux/phylink.h> +#define NXP_SJA1105_XPCS_ID 0x00000010 +#define NXP_SJA1110_XPCS_ID 0x00000020 + /* AN mode */ #define DW_AN_C73 1 #define DW_AN_C37_SGMII 2 +#define DW_2500BASEX 3 -struct mdio_xpcs_args { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - struct mii_bus *bus; - int addr; - int an_mode; -}; +struct xpcs_id; -struct mdio_xpcs_ops { - int (*validate)(struct mdio_xpcs_args *xpcs, - unsigned long *supported, - struct phylink_link_state *state); - int (*config)(struct mdio_xpcs_args *xpcs, - const struct phylink_link_state *state); - int (*get_state)(struct mdio_xpcs_args *xpcs, - struct phylink_link_state *state); - int (*link_up)(struct mdio_xpcs_args *xpcs, int speed, - phy_interface_t interface); - int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface); +struct dw_xpcs { + struct mdio_device *mdiodev; + const struct xpcs_id *id; + struct phylink_pcs pcs; }; -#if IS_ENABLED(CONFIG_PCS_XPCS) -struct mdio_xpcs_ops *mdio_xpcs_get_ops(void); -#else -static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void) -{ - return NULL; -} -#endif +int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); +void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex); +int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, + unsigned int mode); +void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, + struct phylink_link_state *state); +int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, + int enable); +struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, + phy_interface_t interface); +void xpcs_destroy(struct dw_xpcs *xpcs); #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index dff7040f629a..af1071535de8 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -412,7 +412,7 @@ do { \ * instead. * * If there is no other protection through preempt disable and/or disabling - * interupts then one of these RMW operations can show unexpected behavior + * interrupts then one of these RMW operations can show unexpected behavior * because the execution thread was rescheduled on another processor or an * interrupt occurred and the same percpu variable was modified from the * interrupt context. diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 16c35a728b4c..ae16a9856305 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -213,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_inc(). * * This function is safe to call as long as @ref is between init and exit. */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f5a6a2f069ed..fe156a8170aa 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -762,6 +762,7 @@ struct perf_event { #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; + u64 bpf_cookie; #endif #ifdef CONFIG_EVENT_TRACING @@ -1576,6 +1577,12 @@ static struct perf_pmu_events_attr _var = { \ .event_str = _str, \ }; +#define PMU_EVENT_ATTR_ID(_name, _show, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a43047b1030d..e24d2c992b11 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -29,6 +29,24 @@ #endif /* + * This defines the first usable user address. Platforms + * can override its value with custom FIRST_USER_ADDRESS + * defined in their respective <asm/pgtable.h>. + */ +#ifndef FIRST_USER_ADDRESS +#define FIRST_USER_ADDRESS 0UL +#endif + +/* + * This defines the generic helper for accessing PMD page + * table page. Although platforms can still override this + * via their respective <asm/pgtable.h>. + */ +#ifndef pmd_pgtable +#define pmd_pgtable(pmd) pmd_page(pmd) +#endif + +/* * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * * The pXx_index() functions return the index of the entry in the page @@ -88,7 +106,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); + return pud_pgtable(*pud) + pmd_index(address); } #define pmd_offset pmd_offset #endif @@ -96,7 +114,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #ifndef pud_offset static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { - return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); + return p4d_pgtable(*p4d) + pud_index(address); } #define pud_offset pud_offset #endif @@ -1592,4 +1610,26 @@ typedef unsigned int pgtbl_mod_mask; #define pte_leaf_size(x) PAGE_SIZE #endif +/* + * Some architectures have MMUs that are configurable or selectable at boot + * time. These lead to variable PTRS_PER_x. For statically allocated arrays it + * helps to have a static maximum value. + */ + +#ifndef MAX_PTRS_PER_PTE +#define MAX_PTRS_PER_PTE PTRS_PER_PTE +#endif + +#ifndef MAX_PTRS_PER_PMD +#define MAX_PTRS_PER_PMD PTRS_PER_PMD +#endif + +#ifndef MAX_PTRS_PER_PUD +#define MAX_PTRS_PER_PUD PTRS_PER_PUD +#endif + +#ifndef MAX_PTRS_PER_P4D +#define MAX_PTRS_PER_P4D PTRS_PER_P4D +#endif + #endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 852743f07e3e..736e1d1a47c4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -93,6 +93,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface + * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay @@ -111,6 +112,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR + * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN * @PHY_INTERFACE_MODE_MAX: Book keeping @@ -126,6 +128,7 @@ typedef enum { PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, + PHY_INTERFACE_MODE_REVRMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, PHY_INTERFACE_MODE_RGMII_RXID, @@ -145,6 +148,7 @@ typedef enum { PHY_INTERFACE_MODE_XAUI, /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GBASER, + PHY_INTERFACE_MODE_25GBASER, PHY_INTERFACE_MODE_USXGMII, /* 10GBASE-KR - with Clause 73 AN */ PHY_INTERFACE_MODE_10GKR, @@ -185,6 +189,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "rev-mii"; case PHY_INTERFACE_MODE_RMII: return "rmii"; + case PHY_INTERFACE_MODE_REVRMII: + return "rev-rmii"; case PHY_INTERFACE_MODE_RGMII: return "rgmii"; case PHY_INTERFACE_MODE_RGMII_ID: @@ -219,6 +225,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xaui"; case PHY_INTERFACE_MODE_10GBASER: return "10gbase-r"; + case PHY_INTERFACE_MODE_25GBASER: + return "25gbase-r"; case PHY_INTERFACE_MODE_USXGMII: return "usxgmii"; case PHY_INTERFACE_MODE_10GKR: @@ -1373,10 +1381,42 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); #if IS_ENABLED(CONFIG_PHYLIB) +int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id); +struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode); +struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode); +struct phy_device *device_phy_find_device(struct device *dev); +struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); void phy_device_free(struct phy_device *phydev); #else +static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id) +{ + return 0; +} +static inline +struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode) +{ + return 0; +} + +static inline +struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode) +{ + return NULL; +} + +static inline struct phy_device *device_phy_find_device(struct device *dev) +{ + return NULL; +} + +static inline +struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { @@ -1391,6 +1431,7 @@ static inline int phy_device_register(struct phy_device *phy) static inline void phy_device_free(struct phy_device *phydev) { } #endif /* CONFIG_PHYLIB */ void phy_device_remove(struct phy_device *phydev); +int phy_get_c45_ids(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 0ed434d02196..f3286f4cd306 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -125,7 +125,7 @@ struct phy_ops { /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY - * @max_link_rate: Maximum link rate supported by PHY (in Mbps) + * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer) * @mode: PHY mode */ struct phy_attrs { diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 71d956935405..3a35e74cdc61 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. */ #ifndef PHY_TEGRA_XUSB_H @@ -8,6 +8,7 @@ struct tegra_xusb_padctl; struct device; +enum usb_device_speed; struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); @@ -23,4 +24,11 @@ int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl, int tegra_phy_xusb_utmi_port_reset(struct phy *phy); int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, unsigned int port); +int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy, + enum usb_device_speed speed); +int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy); +int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); +int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy); +bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy); + #endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index fd2acfd9b597..237291196ce2 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -441,6 +441,9 @@ void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); +int phylink_fwnode_phy_connect(struct phylink *pl, + struct fwnode_handle *fwnode, + u32 flags); void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); @@ -448,6 +451,9 @@ void phylink_mac_change(struct phylink *, bool up); void phylink_start(struct phylink *); void phylink_stop(struct phylink *); +void phylink_suspend(struct phylink *pl, bool mac_wol); +void phylink_resume(struct phylink *pl); + void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *); int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *); diff --git a/include/linux/pid.h b/include/linux/pid.h index fa10acb8d6a4..af308e15f174 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -78,6 +78,7 @@ struct file; extern struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); +int pidfd_create(struct pid *pid, unsigned int flags); static inline struct pid *get_pid(struct pid *pid) { diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index e18ab3d5908f..eee0e3948537 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -81,28 +81,28 @@ struct pinctrl_map; * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM + * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a + * value on the line. Use argument 1 to indicate high level, argument 0 to + * indicate low level. (Please see Documentation/driver-api/pin-control.rst, + * section "GPIO mode pitfalls" for a discussion around this parameter.) * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current * configuration (eg. the currently selected mux function) drive values on * the line. Use argument 1 to enable output mode, argument 0 to disable * it. - * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a - * value on the line. Use argument 1 to indicate high level, argument 0 to - * indicate low level. (Please see Documentation/driver-api/pinctl.rst, - * section "GPIO mode pitfalls" for a discussion around this parameter.) * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. - * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. - * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to - * this parameter (on a custom format) tells the driver which alternative - * slew rate to use. * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs) * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. + * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. + * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to + * this parameter (on a custom format) tells the driver which alternative + * slew rate to use. * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -127,13 +127,13 @@ enum pin_config_param { PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_MODE_LOW_POWER, PIN_CONFIG_MODE_PWM, - PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_SKEW_DELAY, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, - PIN_CONFIG_SKEW_DELAY, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5d2705f1d01c..fc5642431b92 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -48,6 +48,7 @@ struct pipe_buffer { * @files: number of struct file referring this pipe (protected by ->i_lock) * @r_counter: reader counter * @w_counter: writer counter + * @poll_usage: is this pipe used for epoll, which has crazy wakeups? * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers @@ -70,6 +71,7 @@ struct pipe_inode_info { unsigned int files; unsigned int r_counter; unsigned int w_counter; + unsigned int poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h index 2955ba976048..6beb26b7151d 100644 --- a/include/linux/pkeys.h +++ b/include/linux/pkeys.h @@ -44,10 +44,6 @@ static inline bool arch_pkeys_enabled(void) return false; } -static inline void copy_init_pkru_to_fpregs(void) -{ -} - #endif /* ! CONFIG_ARCH_HAS_PKEYS */ #endif /* _LINUX_PKEYS_H */ diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h deleted file mode 100644 index 0e0d3df9bf72..000000000000 --- a/include/linux/pl353-smc.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ARM PL353 SMC Driver Header - * - * Copyright (C) 2012 - 2018 Xilinx, Inc - */ - -#ifndef __LINUX_PL353_SMC_H -#define __LINUX_PL353_SMC_H - -enum pl353_smc_ecc_mode { - PL353_SMC_ECCMODE_BYPASS = 0, - PL353_SMC_ECCMODE_APB = 1, - PL353_SMC_ECCMODE_MEM = 2 -}; - -enum pl353_smc_mem_width { - PL353_SMC_MEM_WIDTH_8 = 0, - PL353_SMC_MEM_WIDTH_16 = 1 -}; - -u32 pl353_smc_get_ecc_val(int ecc_reg); -bool pl353_smc_ecc_is_busy(void); -int pl353_smc_get_nand_int_status_raw(void); -void pl353_smc_clr_nand_int(void); -int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode); -int pl353_smc_set_ecc_pg_size(unsigned int pg_sz); -int pl353_smc_set_buswidth(unsigned int bw); -void pl353_smc_set_cycles(u32 timings[]); -#endif diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 45f53afc46e2..271bd87bff0a 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4228,6 +4228,7 @@ enum ec_device_event { EC_DEVICE_EVENT_TRACKPAD, EC_DEVICE_EVENT_DSP, EC_DEVICE_EVENT_WIFI, + EC_DEVICE_EVENT_WLC, }; enum ec_device_event_param { @@ -5460,6 +5461,72 @@ struct ec_response_rollback_info { /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 +/** + * Get the number of peripheral charge ports + */ +#define EC_CMD_PCHG_COUNT 0x0134 + +#define EC_PCHG_MAX_PORTS 8 + +struct ec_response_pchg_count { + uint8_t port_count; +} __ec_align1; + +/** + * Get the status of a peripheral charge port + */ +#define EC_CMD_PCHG 0x0135 + +struct ec_params_pchg { + uint8_t port; +} __ec_align1; + +struct ec_response_pchg { + uint32_t error; /* enum pchg_error */ + uint8_t state; /* enum pchg_state state */ + uint8_t battery_percentage; + uint8_t unused0; + uint8_t unused1; + /* Fields added in version 1 */ + uint32_t fw_version; + uint32_t dropped_event_count; +} __ec_align2; + +enum pchg_state { + /* Charger is reset and not initialized. */ + PCHG_STATE_RESET = 0, + /* Charger is initialized or disabled. */ + PCHG_STATE_INITIALIZED, + /* Charger is enabled and ready to detect a device. */ + PCHG_STATE_ENABLED, + /* Device is in proximity. */ + PCHG_STATE_DETECTED, + /* Device is being charged. */ + PCHG_STATE_CHARGING, + /* Device is fully charged. It implies DETECTED (& not charging). */ + PCHG_STATE_FULL, + /* In download (a.k.a. firmware update) mode */ + PCHG_STATE_DOWNLOAD, + /* In download mode. Ready for receiving data. */ + PCHG_STATE_DOWNLOADING, + /* Device is ready for data communication. */ + PCHG_STATE_CONNECTED, + /* Put no more entry below */ + PCHG_STATE_COUNT, +}; + +#define EC_PCHG_STATE_TEXT { \ + [PCHG_STATE_RESET] = "RESET", \ + [PCHG_STATE_INITIALIZED] = "INITIALIZED", \ + [PCHG_STATE_ENABLED] = "ENABLED", \ + [PCHG_STATE_DETECTED] = "DETECTED", \ + [PCHG_STATE_CHARGING] = "CHARGING", \ + [PCHG_STATE_FULL] = "FULL", \ + [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \ + [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \ + [PCHG_STATE_CONNECTED] = "CONNECTED", \ + } + /*****************************************************************************/ /* Voltage regulator controls */ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 5d1fb0d78a22..76b13ef67562 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -96,6 +96,7 @@ enum { MCASP_VERSION_2, /* DA8xx/OMAPL1x */ MCASP_VERSION_3, /* TI81xx/AM33xx */ MCASP_VERSION_4, /* DRA7xxx */ + MCASP_VERSION_OMAP, /* OMAP4/5 */ }; enum mcbsp_clk_input_pin { diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index b34a094b2258..860ba4bc5ead 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -41,36 +41,39 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters + * @nr_masters: Number of AHB masters supported by the controller * @nr_channels: Number of channels supported by hardware (max 8) * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller - * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. * @max_burst: Maximum value of burst transaction size supported by hardware * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). * @protctl: Protection control signals setting per channel. + * @quirks: Optional platform quirks. */ struct dw_dma_platform_data { - unsigned int nr_channels; + u32 nr_masters; + u32 nr_channels; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ - unsigned char chan_allocation_order; + u32 chan_allocation_order; #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ - unsigned char chan_priority; - unsigned int block_size; - unsigned char nr_masters; - unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; - unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; + u32 chan_priority; + u32 block_size; + u32 data_width[DW_DMA_MAX_NR_MASTERS]; + u32 multi_block[DW_DMA_MAX_NR_CHANNELS]; u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) #define CHAN_PROTCTL_MASK GENMASK(2, 0) - unsigned char protctl; + u32 protctl; +#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0) + u32 quirks; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h deleted file mode 100644 index 725602d9df91..000000000000 --- a/include/linux/platform_data/dma-imx-sdma.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __MACH_MXC_SDMA_H__ -#define __MACH_MXC_SDMA_H__ - -/** - * struct sdma_script_start_addrs - SDMA script start pointers - * - * start addresses of the different functions in the physical - * address space of the SDMA engine. - */ -struct sdma_script_start_addrs { - s32 ap_2_ap_addr; - s32 ap_2_bp_addr; - s32 ap_2_ap_fixed_addr; - s32 bp_2_ap_addr; - s32 loopback_on_dsp_side_addr; - s32 mcu_interrupt_only_addr; - s32 firi_2_per_addr; - s32 firi_2_mcu_addr; - s32 per_2_firi_addr; - s32 mcu_2_firi_addr; - s32 uart_2_per_addr; - s32 uart_2_mcu_addr; - s32 per_2_app_addr; - s32 mcu_2_app_addr; - s32 per_2_per_addr; - s32 uartsh_2_per_addr; - s32 uartsh_2_mcu_addr; - s32 per_2_shp_addr; - s32 mcu_2_shp_addr; - s32 ata_2_mcu_addr; - s32 mcu_2_ata_addr; - s32 app_2_per_addr; - s32 app_2_mcu_addr; - s32 shp_2_per_addr; - s32 shp_2_mcu_addr; - s32 mshc_2_mcu_addr; - s32 mcu_2_mshc_addr; - s32 spdif_2_mcu_addr; - s32 mcu_2_spdif_addr; - s32 asrc_2_mcu_addr; - s32 ext_mem_2_ipu_addr; - s32 descrambler_addr; - s32 dptc_dvfs_addr; - s32 utra_addr; - s32 ram_code_start_addr; - /* End of v1 array */ - s32 mcu_2_ssish_addr; - s32 ssish_2_mcu_addr; - s32 hdmi_dma_addr; - /* End of v2 array */ - s32 zcanfd_2_mcu_addr; - s32 zqspi_2_mcu_addr; - s32 mcu_2_ecspi_addr; - /* End of v3 array */ - s32 mcu_2_zqspi_addr; - /* End of v4 array */ -}; - -#endif /* __MACH_MXC_SDMA_H__ */ diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h deleted file mode 100644 index 0aa5c6720259..000000000000 --- a/include/linux/platform_data/gpio-dwapb.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2014 Intel Corporation. - */ - -#ifndef GPIO_DW_APB_H -#define GPIO_DW_APB_H - -#define DWAPB_MAX_GPIOS 32 - -struct dwapb_port_property { - struct fwnode_handle *fwnode; - unsigned int idx; - unsigned int ngpio; - unsigned int gpio_base; - int irq[DWAPB_MAX_GPIOS]; - bool irq_shared; -}; - -struct dwapb_platform_data { - struct dwapb_port_property *properties; - unsigned int nports; -}; - -#endif diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h deleted file mode 100644 index cba1184b364c..000000000000 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2010 Wolfram Sang <kernel@pengutronix.de> - */ - -#ifndef __ASM_ARCH_IMX_ESDHC_H -#define __ASM_ARCH_IMX_ESDHC_H - -#include <linux/types.h> - -enum wp_types { - ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ - ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ - ESDHC_WP_GPIO, /* external gpio pin for WP */ -}; - -enum cd_types { - ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ - ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ - ESDHC_CD_GPIO, /* external gpio pin for CD */ - ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ -}; - -/** - * struct esdhc_platform_data - platform data for esdhc on i.MX - * - * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. - * - * @wp_type: type of write_protect method (see wp_types enum above) - * @cd_type: type of card_detect method (see cd_types enum above) - */ - -struct esdhc_platform_data { - enum wp_types wp_type; - enum cd_types cd_type; - int max_bus_width; - unsigned int delay_line; - unsigned int tuning_step; /* The delay cell steps in tuning procedure */ - unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ - unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ -}; -#endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/pata_ixp4xx_cf.h b/include/linux/platform_data/pata_ixp4xx_cf.h new file mode 100644 index 000000000000..e60fa41da4a5 --- /dev/null +++ b/include/linux/platform_data/pata_ixp4xx_cf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_PATA_IXP4XX_H +#define __PLATFORM_DATA_PATA_IXP4XX_H + +#include <linux/types.h> + +/* + * This structure provide a means for the board setup code + * to give information to th pata_ixp4xx driver. It is + * passed as platform_data. + */ +struct ixp4xx_pata_data { + volatile u32 *cs0_cfg; + volatile u32 *cs1_cfg; + unsigned long cs0_bits; + unsigned long cs1_bits; + void __iomem *cmd; + void __iomem *ctl; +}; + +#endif diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h deleted file mode 100644 index 81a388ff58cc..000000000000 --- a/include/linux/platform_data/spi-ath79.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - */ - -#ifndef _ATH79_SPI_PLATFORM_H -#define _ATH79_SPI_PLATFORM_H - -struct ath79_spi_platform_data { - unsigned bus_num; - unsigned num_chipselect; -}; - -#endif /* _ATH79_SPI_PLATFORM_H */ diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index 65fd5ffd257c..f0db674f07b8 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -12,5 +12,6 @@ /* Board specific platform_data */ struct mtk_chip_config { u32 sample_sel; + u32 tick_delay; }; #endif diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index e40b28ca892e..897051e51b78 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -13,8 +13,9 @@ /** * struct st_sensors_platform_data - Platform data for the ST sensors * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). - * Available only for accelerometer and pressure sensors. + * Available only for accelerometer, magnetometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * Magnetometer DRDY is supported only on LSM9DS0. * @open_drain: set the interrupt line to be open drain if possible. * @spi_3wire: enable spi-3wire mode. * @pullups: enable/disable i2c controller pullup resistors. diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 2f274cf52805..17dc5cb6f3f2 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -61,6 +61,7 @@ #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 /* Misc */ +#define ASUS_WMI_DEVID_PANEL_OD 0x00050019 #define ASUS_WMI_DEVID_CAMERA 0x00060013 #define ASUS_WMI_DEVID_LID_FLIP 0x00060062 @@ -89,6 +90,12 @@ /* Keyboard dock */ #define ASUS_WMI_DEVID_KBD_DOCK 0x00120063 +/* dgpu on/off */ +#define ASUS_WMI_DEVID_EGPU 0x00090019 + +/* dgpu on/off */ +#define ASUS_WMI_DEVID_DGPU 0x00090020 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h index 207e1a317800..41df326583f9 100644 --- a/include/linux/platform_data/x86/clk-lpss.h +++ b/include/linux/platform_data/x86/clk-lpss.h @@ -15,6 +15,6 @@ struct lpss_clk_data { struct clk *clk; }; -extern int lpt_clk_init(void); +extern int lpss_atom_clk_init(void); #endif /* __CLK_LPSS_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index cd81e060863c..7c96f169d274 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -66,9 +66,6 @@ extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); extern void __iomem * -devm_platform_ioremap_resource_wc(struct platform_device *pdev, - unsigned int index); -extern void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name); extern int platform_get_irq(struct platform_device *, unsigned int); @@ -200,8 +197,6 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); -extern int platform_device_add_properties(struct platform_device *pdev, - const struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index a6329003aee7..e5cbb6841f3a 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -2,7 +2,7 @@ /* * Platform profile sysfs interface * - * See Documentation/ABI/testing/sysfs-platform_profile.rst for more + * See Documentation/userspace-api/sysfs-platform_profile.rst for more * information. */ diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h deleted file mode 100644 index b8fac96f05aa..000000000000 --- a/include/linux/pm2301_charger.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * PM2301 charger driver. - * - * Copyright (C) 2012 ST Ericsson Corporation - * - * Contact: Olivier LAUNAY (olivier.launay@stericsson.com - */ - -#ifndef __LINUX_PM2301_H -#define __LINUX_PM2301_H - -/** - * struct pm2xxx_bm_charger_parameters - Charger specific parameters - * @ac_volt_max: maximum allowed AC charger voltage in mV - * @ac_curr_max: maximum allowed AC charger current in mA - */ -struct pm2xxx_bm_charger_parameters { - int ac_volt_max; - int ac_curr_max; -}; - -/** - * struct pm2xxx_bm_data - pm2xxx battery management data - * @enable_overshoot flag to enable VBAT overshoot control - * @chg_params charger parameters - */ -struct pm2xxx_bm_data { - bool enable_overshoot; - const struct pm2xxx_bm_charger_parameters *chg_params; -}; - -struct pm2xxx_charger_platform_data { - char **supplied_to; - size_t num_supplicants; - int i2c_bus; - const char *label; - int gpio_irq_number; - unsigned int lpn_gpio; - int irq_type; -}; - -struct pm2xxx_platform_data { - struct pm2xxx_charger_platform_data *wall_charger; - struct pm2xxx_bm_data *battery; -}; - -#endif /* __LINUX_PM2301_H */ diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8ddc7860e131..ada3a0ab10bf 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -47,6 +47,7 @@ extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); +extern int devm_pm_clk_create(struct device *dev); #else static inline bool pm_clk_no_clocks(struct device *dev) { @@ -83,6 +84,10 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id) static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) { } +static inline int devm_pm_clk_create(struct device *dev) +{ + return -EINVAL; +} #endif #ifdef CONFIG_HAVE_CLK diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index dfcfbcecc34b..67017c9390c8 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -198,6 +198,8 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + unsigned int default_pstate; + unsigned int rpm_pstate; ktime_t next_wakeup; void *data; }; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6c08a085367b..222da43b7096 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -59,6 +59,8 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); +extern int devm_pm_runtime_enable(struct device *dev); + /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. * @dev: Target device. @@ -253,6 +255,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } + static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} @@ -380,6 +384,9 @@ static inline int pm_runtime_get(struct device *dev) * The possible return values of this function are the same as for * pm_runtime_resume() and the runtime PM usage counter of @dev remains * incremented in all cases, even if it returns an error code. + * Consider using pm_runtime_resume_and_get() instead of it, especially + * if its return value is checked by the caller, as this is likely to result + * in cleaner code. */ static inline int pm_runtime_get_sync(struct device *dev) { diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h index 12cbbf305969..fa9f08164c36 100644 --- a/include/linux/pmbus.h +++ b/include/linux/pmbus.h @@ -43,6 +43,36 @@ */ #define PMBUS_NO_CAPABILITY BIT(2) +/* + * PMBUS_READ_STATUS_AFTER_FAILED_CHECK + * + * Some PMBus chips end up in an undefined state when trying to read an + * unsupported register. For such chips, it is necessary to reset the + * chip pmbus controller to a known state after a failed register check. + * This can be done by reading a known register. By setting this flag the + * driver will try to read the STATUS register after each failed + * register check. This read may fail, but it will put the chip in a + * known state. + */ +#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3) + +/* + * PMBUS_NO_WRITE_PROTECT + * + * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT + * register. For such chips, this flag should be set so that the PMBus core + * driver doesn't use the WRITE_PROTECT command to determine its behavior. + */ +#define PMBUS_NO_WRITE_PROTECT BIT(4) + +/* + * PMBUS_USE_COEFFICIENTS_CMD + * + * When this flag is set the PMBus core driver will use the COEFFICIENTS + * register to initialize the coefficients for the direct mode format. + */ +#define PMBUS_USE_COEFFICIENTS_CMD BIT(5) + struct pmbus_platform_data { u32 flags; /* Device specific flags */ diff --git a/include/linux/poison.h b/include/linux/poison.h index aff1c9250c82..d62ef5a6b4e9 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -78,4 +78,7 @@ /********** security/ **********/ #define KEY_DESTROY 0xbd +/********** net/core/page_pool.c **********/ +#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA) + #endif diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 896c16d2c5fb..00fef0064355 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -82,12 +82,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head, return timerqueue_add(head, &ctmr->node); } -static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) +static inline bool cpu_timer_queued(struct cpu_timer *ctmr) { - if (ctmr->head) { + return !!ctmr->head; +} + +static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (cpu_timer_queued(ctmr)) { timerqueue_del(ctmr->head, &ctmr->node); ctmr->head = NULL; + return true; } + return false; } static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 307094ebb88c..b65c877d92b8 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -72,6 +72,8 @@ extern struct posix_acl *get_posix_acl(struct inode *, int); extern int set_posix_acl(struct user_namespace *, struct inode *, int, struct posix_acl *); +struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); + #ifdef CONFIG_FS_POSIX_ACL int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, @@ -84,7 +86,6 @@ extern int simple_set_acl(struct user_namespace *, struct inode *, extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); -struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h deleted file mode 100644 index 51976b52f373..000000000000 --- a/include/linux/power/ab8500.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson 2013 - * Author: Hongbo Zhang <hongbo.zhang@linaro.com> - */ - -#ifndef PWR_AB8500_H -#define PWR_AB8500_H - -extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[]; -extern const int ab8500_temp_tbl_a_size; - -extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[]; -extern const int ab8500_temp_tbl_b_size; - -#endif /* PWR_AB8500_H */ diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index d55c746ac56e..dd24756a8af7 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -69,7 +69,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAx17042_TOFF = 0x2D, + MAX17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, @@ -110,13 +110,14 @@ enum max17042_register { MAX17042_VFSOC = 0xFF, }; +/* Registers specific to max17055 only */ enum max17055_register { MAX17055_QRes = 0x0C, + MAX17055_RCell = 0x14, MAX17055_TTF = 0x20, - MAX17055_V_empty = 0x3A, - MAX17055_TIMER = 0x3E, + MAX17055_DieTemp = 0x34, MAX17055_USER_MEM = 0x40, - MAX17055_RGAIN = 0x42, + MAX17055_RGAIN = 0x43, MAX17055_ConvgCfg = 0x49, MAX17055_VFRemCap = 0x4A, @@ -155,13 +156,14 @@ enum max17055_register { MAX17055_AtAvCap = 0xDF, }; -/* Registers specific to max17047/50 */ +/* Registers specific to max17047/50/55 */ enum max17047_register { MAX17047_QRTbl00 = 0x12, MAX17047_FullSOCThr = 0x13, MAX17047_QRTbl10 = 0x22, MAX17047_QRTbl20 = 0x32, MAX17047_V_empty = 0x3A, + MAX17047_TIMER = 0x3E, MAX17047_QRTbl30 = 0x42, }; diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index 971c9264179e..167b9b040091 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -155,6 +155,7 @@ struct omap_sr { struct voltagedomain *voltdm; struct dentry *dbg_dir; unsigned int irq; + struct clk *fck; int srid; int ip_type; int nvalue_count; @@ -169,6 +170,7 @@ struct omap_sr { u32 senp_mod; u32 senn_mod; void __iomem *base; + unsigned long enabled:1; }; /** diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index be203985ecdd..9ca1f120a211 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table { */ struct power_supply_battery_info { + unsigned int technology; /* from the enum above */ int energy_full_design_uwh; /* microWatt-hours */ int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ diff --git a/include/linux/prandom.h b/include/linux/prandom.h index bbf4b4ad61df..056d31317e49 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m) */ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) { - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL; state->s1 = __seed(i, 2U); state->s2 = __seed(i, 8U); diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 9881eac0698f..4d244e295e85 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -121,7 +121,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: diff --git a/include/linux/printk.h b/include/linux/printk.h index fe7eb2351610..85b656f82d75 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -2,12 +2,13 @@ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/init.h> #include <linux/kern_levels.h> #include <linux/linkage.h> #include <linux/cache.h> #include <linux/ratelimit_types.h> +#include <linux/once_lite.h> extern const char linux_banner[]; extern const char linux_proc_banner[]; @@ -69,16 +70,7 @@ extern int console_printk[]; #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) -static inline void console_silent(void) -{ - console_loglevel = CONSOLE_LOGLEVEL_SILENT; -} - -static inline void console_verbose(void) -{ - if (console_loglevel) - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} +extern void console_verbose(void); /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 @@ -149,18 +141,6 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -#ifdef CONFIG_PRINTK_NMI -extern void printk_nmi_enter(void); -extern void printk_nmi_exit(void); -extern void printk_nmi_direct_enter(void); -extern void printk_nmi_direct_exit(void); -#else -static inline void printk_nmi_enter(void) { } -static inline void printk_nmi_exit(void) { } -static inline void printk_nmi_direct_enter(void) { } -static inline void printk_nmi_direct_exit(void) { } -#endif /* PRINTK_NMI */ - struct dev_printk_info; #ifdef CONFIG_PRINTK @@ -173,12 +153,22 @@ asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); asmlinkage __printf(1, 2) __cold -int printk(const char *fmt, ...); +int _printk(const char *fmt, ...); /* * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ -__printf(1, 2) __cold int printk_deferred(const char *fmt, ...); +__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); + +extern void __printk_safe_enter(void); +extern void __printk_safe_exit(void); +/* + * The printk_deferred_enter/exit macros are available only as a hack for + * some code paths that need to defer all printk console printing. Interrupts + * must be disabled for the deferred duration. + */ +#define printk_deferred_enter __printk_safe_enter +#define printk_deferred_exit __printk_safe_exit /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -206,9 +196,8 @@ void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); +extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; -extern void printk_safe_flush(void); -extern void printk_safe_flush_on_panic(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -216,15 +205,24 @@ int vprintk(const char *s, va_list args) return 0; } static inline __printf(1, 2) __cold -int printk(const char *s, ...) +int _printk(const char *s, ...) { return 0; } static inline __printf(1, 2) __cold -int printk_deferred(const char *s, ...) +int _printk_deferred(const char *s, ...) { return 0; } + +static inline void printk_deferred_enter(void) +{ +} + +static inline void printk_deferred_exit(void) +{ +} + static inline int printk_ratelimit(void) { return 0; @@ -269,19 +267,56 @@ static inline void show_regs_print_info(const char *log_lvl) { } -static inline void dump_stack(void) +static inline void dump_stack_lvl(const char *log_lvl) { } -static inline void printk_safe_flush(void) -{ -} - -static inline void printk_safe_flush_on_panic(void) +static inline void dump_stack(void) { } #endif +#ifdef CONFIG_SMP +extern int __printk_cpu_trylock(void); +extern void __printk_wait_on_cpu_lock(void); +extern void __printk_cpu_unlock(void); + +/** + * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning + * lock and disable interrupts. + * @flags: Stack-allocated storage for saving local interrupt state, + * to be passed to printk_cpu_unlock_irqrestore(). + * + * If the lock is owned by another CPU, spin until it becomes available. + * Interrupts are restored while spinning. + */ +#define printk_cpu_lock_irqsave(flags) \ + for (;;) { \ + local_irq_save(flags); \ + if (__printk_cpu_trylock()) \ + break; \ + local_irq_restore(flags); \ + __printk_wait_on_cpu_lock(); \ + } + +/** + * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning + * lock and restore interrupts. + * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). + */ +#define printk_cpu_unlock_irqrestore(flags) \ + do { \ + __printk_cpu_unlock(); \ + local_irq_restore(flags); \ + } while (0) \ + +#else + +#define printk_cpu_lock_irqsave(flags) ((void)flags) +#define printk_cpu_unlock_irqrestore(flags) ((void)flags) + +#endif /* CONFIG_SMP */ + extern int kptr_restrict; /** @@ -301,6 +336,117 @@ extern int kptr_restrict; #define pr_fmt(fmt) fmt #endif +struct module; + +#ifdef CONFIG_PRINTK_INDEX +struct pi_entry { + const char *fmt; + const char *func; + const char *file; + unsigned int line; + + /* + * While printk and pr_* have the level stored in the string at compile + * time, some subsystems dynamically add it at runtime through the + * format string. For these dynamic cases, we allow the subsystem to + * tell us the level at compile time. + * + * NULL indicates that the level, if any, is stored in fmt. + */ + const char *level; + + /* + * The format string used by various subsystem specific printk() + * wrappers to prefix the message. + * + * Note that the static prefix defined by the pr_fmt() macro is stored + * directly in the message format (@fmt), not here. + */ + const char *subsys_fmt_prefix; +} __packed; + +#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \ + do { \ + if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \ + /* + * We check __builtin_constant_p multiple times here + * for the same input because GCC will produce an error + * if we try to assign a static variable to fmt if it + * is not a constant, even with the outer if statement. + */ \ + static const struct pi_entry _entry \ + __used = { \ + .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + .level = __builtin_constant_p(_level) ? (_level) : NULL, \ + .subsys_fmt_prefix = _subsys_fmt_prefix,\ + }; \ + static const struct pi_entry *_entry_ptr \ + __used __section(".printk_index") = &_entry; \ + } \ + } while (0) + +#else /* !CONFIG_PRINTK_INDEX */ +#define __printk_index_emit(...) do {} while (0) +#endif /* CONFIG_PRINTK_INDEX */ + +/* + * Some subsystems have their own custom printk that applies a va_format to a + * generic format, for example, to include a device number or other metadata + * alongside the format supplied by the caller. + * + * In order to store these in the way they would be emitted by the printk + * infrastructure, the subsystem provides us with the start, fixed string, and + * any subsequent text in the format string. + * + * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed + * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the + * first one. + * + * subsys_fmt_prefix must be known at compile time, or compilation will fail + * (since this is a mistake). If fmt or level is not known at compile time, no + * index entry will be made (since this can legitimately happen). + */ +#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \ + __printk_index_emit(fmt, level, subsys_fmt_prefix) + +#define printk_index_wrap(_p_func, _fmt, ...) \ + ({ \ + __printk_index_emit(_fmt, NULL, NULL); \ + _p_func(_fmt, ##__VA_ARGS__); \ + }) + + +/** + * printk - print a kernel message + * @fmt: format string + * + * This is printk(). It can be called from any context. We want it to work. + * + * If printk indexing is enabled, _printk() is called from printk_index_wrap. + * Otherwise, printk is simply #defined to _printk. + * + * We try to grab the console_lock. If we succeed, it's easy - we log the + * output and call the console drivers. If we fail to get the semaphore, we + * place the output into the log buffer and return. The current holder of + * the console_sem will notice the new output in console_unlock(); and will + * send it to the consoles before releasing the lock. + * + * One effect of this deferred printing is that code which calls printk() and + * then changes console_loglevel may break. This is because console_loglevel + * is inspected when the actual printing occurs. + * + * See also: + * printf(3) + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__) +#define printk_deferred(fmt, ...) \ + printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__) + /** * pr_emerg - Print an emergency-level message * @fmt: format string @@ -436,27 +582,9 @@ extern int kptr_restrict; #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ -({ \ - static bool __section(".data.once") __print_once; \ - bool __ret_print_once = !__print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk(fmt, ##__VA_ARGS__); \ - } \ - unlikely(__ret_print_once); \ -}) + DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__) #define printk_deferred_once(fmt, ...) \ -({ \ - static bool __section(".data.once") __print_once; \ - bool __ret_print_once = !__print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk_deferred(fmt, ##__VA_ARGS__); \ - } \ - unlikely(__ret_print_once); \ -}) + DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/prmt.h b/include/linux/prmt.h new file mode 100644 index 000000000000..24da8364b919 --- /dev/null +++ b/include/linux/prmt.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifdef CONFIG_ACPI_PRMT +void init_prmt(void); +#else +static inline void init_prmt(void) { } +#endif diff --git a/include/linux/property.h b/include/linux/property.h index 0d876316e61d..357513a977e5 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -119,7 +119,7 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); -int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); +int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); unsigned int device_get_child_node_count(struct device *dev); @@ -484,8 +484,6 @@ void software_node_unregister_node_group(const struct software_node **node_group int software_node_register(const struct software_node *node); void software_node_unregister(const struct software_node *node); -int software_node_notify(struct device *dev, unsigned long action); - struct fwnode_handle * fwnode_create_software_node(const struct property_entry *properties, const struct fwnode_handle *parent); diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h index 99564f93d774..924ca07aafbd 100644 --- a/include/linux/pstore_blk.h +++ b/include/linux/pstore_blk.h @@ -10,36 +10,15 @@ /** * struct pstore_device_info - back-end pstore/blk driver structure. * - * @total_size: The total size in bytes pstore/blk can use. It must be greater - * than 4096 and be multiple of 4096. * @flags: Refer to macro starting with PSTORE_FLAGS defined in * linux/pstore.h. It means what front-ends this device support. * Zero means all backends for compatible. - * @read: The general read operation. Both of the function parameters - * @size and @offset are relative value to bock device (not the - * whole disk). - * On success, the number of bytes should be returned, others - * means error. - * @write: The same as @read, but the following error number: - * -EBUSY means try to write again later. - * -ENOMSG means to try next zone. - * @erase: The general erase operation for device with special removing - * job. Both of the function parameters @size and @offset are - * relative value to storage. - * Return 0 on success and others on failure. - * @panic_write:The write operation only used for panic case. It's optional - * if you do not care panic log. The parameters are relative - * value to storage. - * On success, the number of bytes should be returned, others - * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + * @zone: The struct pstore_zone_info details. + * */ struct pstore_device_info { - unsigned long total_size; unsigned int flags; - pstore_zone_read_op read; - pstore_zone_write_op write; - pstore_zone_erase_op erase; - pstore_zone_write_op panic_write; + struct pstore_zone_info zone; }; int register_pstore_device(struct pstore_device_info *dev); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 0d47fd33b228..2e5565067355 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,10 @@ #include <linux/device.h> #include <linux/pps_kernel.h> #include <linux/ptp_clock.h> +#include <linux/timecounter.h> +#include <linux/skbuff.h> +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +137,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -186,7 +189,33 @@ struct ptp_clock_event { }; }; -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) +/** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ +static inline long scaled_ppm_to_ppb(long ppm) +{ + /* + * The 'freq' field in the 'struct timex' is in parts per + * million, but with a 16 bit binary fractional field. + * + * We want to calculate + * + * ppb = scaled_ppm * 1000 / 2^16 + * + * which simplifies to + * + * ppb = scaled_ppm * 125 / 2^13 + */ + s64 ppb = 1 + ppm; + + ppb *= 125; + ppb >>= 13; + return (long)ppb; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) /** * ptp_clock_register() - register a PTP hardware clock driver @@ -230,14 +259,6 @@ extern void ptp_clock_event(struct ptp_clock *ptp, extern int ptp_clock_index(struct ptp_clock *ptp); /** - * scaled_ppm_to_ppb() - convert scaled ppm to ppb - * - * @ppm: Parts per million, but with a 16 bit binary fractional field - */ - -extern s32 scaled_ppm_to_ppb(long ppm); - -/** * ptp_find_pin() - obtain the pin index of a given auxiliary function * * The caller must hold ptp_clock::pincfg_mux. Drivers do not have @@ -305,6 +326,40 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +#endif + +#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) +/* + * These are called by the network core, and don't work if PTP is in + * a loadable module. + */ + +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); +#else +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 5bb90af4997e..725c9b784e60 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -54,12 +54,17 @@ enum { * @duty_cycle: PWM duty cycle (in nanoseconds) * @polarity: PWM polarity * @enabled: PWM enabled status + * @usage_power: If set, the PWM driver is only required to maintain the power + * output but has more freedom regarding signal form. + * If supported, the signal can be optimized, for example to + * improve EMI by phase shifting individual channels. */ struct pwm_state { u64 period; u64 duty_cycle; enum pwm_polarity polarity; bool enabled; + bool usage_power; }; /** @@ -188,6 +193,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm, state->period = args.period; state->polarity = args.polarity; state->duty_cycle = 0; + state->usage_power = false; } /** @@ -398,7 +404,10 @@ int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); int pwmchip_add(struct pwm_chip *chip); -int pwmchip_remove(struct pwm_chip *chip); +void pwmchip_remove(struct pwm_chip *chip); + +int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); + struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label); @@ -417,7 +426,6 @@ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, struct pwm_device *devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, const char *con_id); -void devm_pwm_put(struct device *dev, struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -524,10 +532,6 @@ devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, { return ERR_PTR(-ENODEV); } - -static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) -{ -} #endif static inline void pwm_apply_args(struct pwm_device *pwm) @@ -558,6 +562,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm) state.enabled = false; state.polarity = pwm->args.polarity; state.period = pwm->args.period; + state.usage_power = false; pwm_apply_state(pwm, &state); } diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 7f73b26ed22e..a3fec2de512f 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2003 Russell King, All Rights Reserved. + * Copyright (C) 2003 Russell King, All Rights Reserved. * * This driver supports the following PXA CPU/SSP ports:- * @@ -11,8 +11,8 @@ * PXA3xx SSP1, SSP2, SSP3, SSP4 */ -#ifndef __LINUX_SSP_H -#define __LINUX_SSP_H +#ifndef __LINUX_PXA2XX_SSP_H +#define __LINUX_PXA2XX_SSP_H #include <linux/bits.h> #include <linux/compiler_types.h> @@ -38,7 +38,6 @@ struct device_node; #define SSDR (0x10) /* SSP Data Write/Data Read Register */ #define SSTO (0x28) /* SSP Time Out Register */ -#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */ #define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ #define SSTSA (0x30) /* SSP Tx Timeslot Active */ #define SSRSA (0x34) /* SSP Rx Timeslot Active */ @@ -60,7 +59,7 @@ struct device_node; /* PXA27x, PXA3xx */ #define SSCR0_EDSS BIT(20) /* Extended data size select */ #define SSCR0_NCS BIT(21) /* Network clock select */ -#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */ #define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */ #define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */ #define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ @@ -105,6 +104,9 @@ struct device_node; #define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */ #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ +/* Intel Quark X1000 */ +#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */ + /* QUARK_X1000 SSCR0 bit definition */ #define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */ #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ @@ -124,7 +126,7 @@ struct device_node; #define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */ #define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */ -/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ +/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ @@ -181,6 +183,21 @@ struct device_node; #define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ #define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */ +/* Intel Merrifield SSP */ +#define SFIFOL 0x68 /* FIFO level */ +#define SFIFOTT 0x6c /* FIFO trigger threshold */ + +#define RX_THRESH_MRFLD_DFLT 16 +#define TX_THRESH_MRFLD_DFLT 16 + +#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */ +#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */ + +#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */ +#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */ +#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */ +#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */ + /* LPSS SSP */ #define SSITF 0x44 /* TX FIFO trigger level */ #define SSITF_TxHiThresh(x) (((x) - 1) << 0) @@ -203,8 +220,10 @@ enum pxa_ssp_type { MMP2_SSP, PXA910_SSP, CE4100_SSP, + MRFLD_SSP, QUARK_X1000_SSP, - LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ + /* Keep LPSS types sorted with lpss_platforms[] */ + LPSS_LPT_SSP, LPSS_BYT_SSP, LPSS_BSW_SSP, LPSS_SPT_SSP, @@ -252,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg) return __raw_readl(dev->mmio_base + reg); } +static inline void pxa_ssp_enable(struct ssp_device *ssp) +{ + u32 sscr0; + + sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE; + pxa_ssp_write_reg(ssp, SSCR0, sscr0); +} + +static inline void pxa_ssp_disable(struct ssp_device *ssp) +{ + u32 sscr0; + + sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE; + pxa_ssp_write_reg(ssp, SSCR0, sscr0); +} + #if IS_ENABLED(CONFIG_PXA_SSP) struct ssp_device *pxa_ssp_request(int port, const char *label); void pxa_ssp_free(struct ssp_device *); @@ -270,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n, static inline void pxa_ssp_free(struct ssp_device *ssp) {} #endif -#endif +#endif /* __LINUX_PXA2XX_SSP_H */ diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 7c811eebcaab..f5672785c0c4 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -8,11 +8,24 @@ #include <linux/interconnect.h> -/* Transfer mode supported by GENI Serial Engines */ +/** + * enum geni_se_xfer_mode: Transfer modes supported by Serial Engines + * + * @GENI_SE_INVALID: Invalid mode + * @GENI_SE_FIFO: FIFO mode. Data is transferred with SE FIFO + * by programmed IO method + * @GENI_SE_DMA: Serial Engine DMA mode. Data is transferred + * with SE by DMAengine internal to SE + * @GENI_GPI_DMA: GPI DMA mode. Data is transferred using a DMAengine + * configured by a firmware residing on a GSI engine. This DMA name is + * interchangeably used as GSI or GPI which seem to imply the same DMAengine + */ + enum geni_se_xfer_mode { GENI_SE_INVALID, GENI_SE_FIFO, GENI_SE_DMA, + GENI_GPI_DMA, }; /* Protocols supported by GENI Serial Engines */ @@ -63,6 +76,7 @@ struct geni_se { #define SE_GENI_STATUS 0x40 #define GENI_SER_M_CLK_CFG 0x48 #define GENI_SER_S_CLK_CFG 0x4c +#define GENI_IF_DISABLE_RO 0x64 #define GENI_FW_REVISION_RO 0x68 #define SE_GENI_CLK_SEL 0x7c #define SE_GENI_DMA_MODE_EN 0x258 @@ -105,6 +119,9 @@ struct geni_se { #define CLK_DIV_MSK GENMASK(15, 4) #define CLK_DIV_SHFT 4 +/* GENI_IF_DISABLE_RO fields */ +#define FIFO_IF_DISABLE (BIT(0)) + /* GENI_FW_REVISION_RO fields */ #define FW_REV_PROTOCOL_MSK GENMASK(15, 8) #define FW_REV_PROTOCOL_SHFT 8 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 0165824c5128..c0475d1c9885 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -109,6 +109,12 @@ extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); + +extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version); +extern int qcom_scm_lmh_profile_change(u32 profile_id); +extern bool qcom_scm_lmh_dcvsh_available(void); + #else #include <linux/errno.h> @@ -170,5 +176,13 @@ static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; } + +static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version) + { return -ENODEV; } + +static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; } + +static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; } #endif #endif diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 977807e1be53..0a3807e927c5 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -702,7 +702,7 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { - PROTOCOLID_ISCSI, + PROTOCOLID_TCP_ULP, PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h new file mode 100644 index 000000000000..5a2ab0606308 --- /dev/null +++ b/include/linux/qed/nvmetcp_common.h @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef __NVMETCP_COMMON__ +#define __NVMETCP_COMMON__ + +#include "tcp_common.h" +#include <linux/nvme-tcp.h> + +#define NVMETCP_SLOW_PATH_LAYER_CODE (6) +#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf) + +/* NVMeTCP firmware function init parameters */ +struct nvmetcp_spe_func_init { + __le16 half_way_close_timeout; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 ll2_rx_queue_id; + u8 flags; +#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1 +#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1 +#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F +#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2 + u8 debug_flags; + __le16 reserved1; + u8 params; +#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF +#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0 +#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF +#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4 + u8 reserved2[5]; + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; +}; + +/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */ +struct nvmetcp_init_ramrod_params { + struct nvmetcp_spe_func_init nvmetcp_init_spe; + struct tcp_init_params tcp_init; +}; + +/* NVMeTCP Ramrod Command IDs */ +enum nvmetcp_ramrod_cmd_id { + NVMETCP_RAMROD_CMD_ID_UNUSED = 0, + NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1, + NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2, + NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3, + NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4, + NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5, + NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6, + MAX_NVMETCP_RAMROD_CMD_ID +}; + +struct nvmetcp_glbl_queue_entry { + struct regpair cq_pbl_addr; + struct regpair reserved; +}; + +/* NVMeTCP conn level EQEs */ +enum nvmetcp_eqe_opcode { + NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */ + NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */ + NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */ + NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */ + NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */ + NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */ + NVMETCP_EVENT_TYPE_RESERVED0, + NVMETCP_EVENT_TYPE_RESERVED1, + NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */ + NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */ + NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */ + NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */ + NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */ + NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */ + MAX_NVMETCP_EQE_OPCODE +}; + +struct nvmetcp_conn_offload_section { + struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */ + __le16 cccid_max_range; /* CCCID max value - used for validation */ + __le16 reserved[3]; +}; + +/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */ +struct nvmetcp_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1 +#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3 +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF +#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4 + u8 default_cq; + __le16 reserved0; + __le32 reserved1; + __le32 initial_ack; + + struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */ +}; + +/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */ +struct nvmetcp_spe_conn_offload { + __le16 reserved; + __le16 conn_id; + __le32 fw_cid; + struct nvmetcp_conn_offload_params nvmetcp; + struct tcp_offload_params_opt2 tcp; +}; + +/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */ +struct nvmetcp_conn_update_ramrod_params { + __le16 reserved0; + __le16 conn_id; + __le32 reserved1; + u8 flags; +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1 +#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7 + u8 reserved3[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 reserved4[5]; +}; + +/* NVMeTCP connection termination request */ +struct nvmetcp_spe_conn_termination { + __le16 reserved0; + __le16 conn_id; + __le32 reserved1; + u8 abortive; + u8 reserved2[7]; + struct regpair reserved3; + struct regpair reserved4; +}; + +struct nvmetcp_dif_flags { + u8 flags; +}; + +enum nvmetcp_wqe_type { + NVMETCP_WQE_TYPE_NORMAL, + NVMETCP_WQE_TYPE_TASK_CLEANUP, + NVMETCP_WQE_TYPE_MIDDLE_PATH, + NVMETCP_WQE_TYPE_IC, + MAX_NVMETCP_WQE_TYPE +}; + +struct nvmetcp_wqe { + __le16 task_id; + u8 flags; +#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */ +#define NVMETCP_WQE_WQE_TYPE_SHIFT 0 +#define NVMETCP_WQE_NUM_SGES_MASK 0xF +#define NVMETCP_WQE_NUM_SGES_SHIFT 3 +#define NVMETCP_WQE_RESPONSE_MASK 0x1 +#define NVMETCP_WQE_RESPONSE_SHIFT 7 + struct nvmetcp_dif_flags prot_flags; + __le32 contlen_cdbsize; +#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF +#define NVMETCP_WQE_CONT_LEN_SHIFT 0 +#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF +#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24 +}; + +struct nvmetcp_host_cccid_itid_entry { + __le16 itid; +}; + +struct nvmetcp_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; +}; + +struct nvmetcp_eqe_data { + __le16 icid; + __le16 conn_id; + __le16 reserved; + u8 error_code; + u8 error_pdu_opcode_reserved; +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 +#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 +#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1 +#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7 +}; + +enum nvmetcp_task_type { + NVMETCP_TASK_TYPE_HOST_WRITE, + NVMETCP_TASK_TYPE_HOST_READ, + NVMETCP_TASK_TYPE_INIT_CONN_REQUEST, + NVMETCP_TASK_TYPE_RESERVED0, + NVMETCP_TASK_TYPE_CLEANUP, + NVMETCP_TASK_TYPE_HOST_READ_NO_CQE, + MAX_NVMETCP_TASK_TYPE +}; + +struct nvmetcp_db_data { + u8 params; +#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */ +#define NVMETCP_DB_DATA_DEST_SHIFT 0 +#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */ +#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2 +#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */ +#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4 +#define NVMETCP_DB_DATA_RESERVED_MASK 0x1 +#define NVMETCP_DB_DATA_RESERVED_SHIFT 5 +#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */ +#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */ + __le16 sq_prod; +}; + +struct nvmetcp_fw_nvmf_cqe { + __le32 reserved[4]; +}; + +struct nvmetcp_icresp_mdata { + u8 digest; + u8 cpda; + __le16 pfv; + __le32 maxdata; + __le16 rsvd[4]; +}; + +union nvmetcp_fw_cqe_data { + struct nvmetcp_fw_nvmf_cqe nvme_cqe; + struct nvmetcp_icresp_mdata icresp_mdata; +}; + +struct nvmetcp_fw_cqe { + __le16 conn_id; + u8 cqe_type; + u8 cqe_error_status_bits; +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 + __le16 itid; + u8 task_type; + u8 fw_dbg_field; + u8 caused_conn_err; + u8 reserved0[3]; + __le32 reserved1; + union nvmetcp_fw_cqe_data cqe_data; + struct regpair task_opaque; + __le32 reserved[6]; +}; + +enum nvmetcp_fw_cqes_type { + NVMETCP_FW_CQE_TYPE_NORMAL = 1, + NVMETCP_FW_CQE_TYPE_RESERVED0, + NVMETCP_FW_CQE_TYPE_RESERVED1, + NVMETCP_FW_CQE_TYPE_CLEANUP, + NVMETCP_FW_CQE_TYPE_DUMMY, + MAX_NVMETCP_FW_CQES_TYPE +}; + +struct ystorm_nvmetcp_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 resrved0; + __le32 buffer_offset; + __le16 cccid; + struct nvmetcp_dif_flags dif_flags; + u8 flags; +#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1 +#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3 +}; + +struct ystorm_nvmetcp_task_rxmit_opt { + __le32 reserved[4]; +}; + +struct nvmetcp_task_hdr { + __le32 reg[18]; +}; + +struct nvmetcp_task_hdr_aligned { + struct nvmetcp_task_hdr task_hdr; + __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */ +}; + +struct e5_tdif_task_context { + __le32 reserved[16]; +}; + +struct e5_rdif_task_context { + __le32 reserved[12]; +}; + +struct ystorm_nvmetcp_task_st_ctx { + struct ystorm_nvmetcp_task_state state; + struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt; + struct nvmetcp_task_hdr_aligned pdu_hdr; +}; + +struct mstorm_nvmetcp_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct nvmetcp_dif_flags dif_flags; + __le16 dif_task_icid; + struct regpair reserved0; + __le32 expected_itt; + __le32 reserved1; +}; + +struct ustorm_nvmetcp_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair reserved0; + __le32 reg1_map; +#define REG1_NUM_SGES_MASK 0xF +#define REG1_NUM_SGES_SHIFT 0 +#define REG1_RESERVED1_MASK 0xFFFFFFF +#define REG1_RESERVED1_SHIFT 4 + u8 flags2; +#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct nvmetcp_dif_flags dif_flags; + __le16 reserved3; + __le16 tqe_opaque[2]; + __le32 reserved5; + __le32 nvme_tcp_opaque_lo; + __le32 nvme_tcp_opaque_hi; + u8 task_type; + u8 error_flags; +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3 + u8 flags; +#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 + u8 cq_rss_number; +}; + +struct e5_ystorm_nvmetcp_task_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state_and_core_id */; + __le16 word0 /* icid */; + u8 flags0; + u8 flags1; + u8 flags2; + u8 flags3; + __le32 TTT; + u8 byte2; + u8 byte3; + u8 byte4; + u8 e4_reserved7; +}; + +struct e5_mstorm_nvmetcp_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; + u8 flags3; + __le32 reg0; + u8 byte2; + u8 byte3; + u8 byte4; + u8 e4_reserved7; +}; + +struct e5_ustorm_nvmetcp_task_ag_ctx { + u8 reserved; + u8 state_and_core_id; + __le16 icid; + u8 flags0; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; + u8 flags4; +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_MASK 0x3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED5_SHIFT 0 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED6_SHIFT 2 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_MASK 0x1 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_E4_RESERVED7_SHIFT 3 +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + u8 byte2; + u8 byte3; + u8 e4_reserved8; + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + __le16 word1; + __le16 next_tid; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +struct e5_nvmetcp_task_context { + struct ystorm_nvmetcp_task_st_ctx ystorm_st_context; + struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct e5_tdif_task_context tdif_context; + struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context; + struct regpair ustorm_ag_padding[2]; + struct mstorm_nvmetcp_task_st_ctx mstorm_st_context; + struct regpair mstorm_st_padding[2]; + struct ustorm_nvmetcp_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct e5_rdif_task_context rdif_context; +}; + +#endif /* __NVMETCP_COMMON__*/ diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 68d17a4fbf20..850b98991670 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -542,6 +542,22 @@ struct qed_iscsi_pf_params { u8 bdq_pbl_num_entries[3]; }; +struct qed_nvmetcp_pf_params { + u64 glbl_q_params_addr; + u16 cq_num_entries; + u16 num_cons; + u16 num_tasks; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + u16 min_rto; +}; + struct qed_rdma_pf_params { /* Supplied to QED during resource allocation (may affect the ILT and * the doorbell BAR). @@ -560,6 +576,7 @@ struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; + struct qed_nvmetcp_pf_params nvmetcp_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -662,6 +679,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, + QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI, QED_PROTOCOL_FCOE, }; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ea273ba1c991..ff808d248883 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -18,7 +18,7 @@ enum qed_ll2_conn_type { QED_LL2_TYPE_FCOE, - QED_LL2_TYPE_ISCSI, + QED_LL2_TYPE_TCP_ULP, QED_LL2_TYPE_TEST, QED_LL2_TYPE_OOO, QED_LL2_TYPE_RESERVED2, diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h new file mode 100644 index 000000000000..14671bc19ed1 --- /dev/null +++ b/include/linux/qed/qed_nvmetcp_if.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef _QED_NVMETCP_IF_H +#define _QED_NVMETCP_IF_H +#include <linux/types.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/storage_common.h> +#include <linux/qed/nvmetcp_common.h> + +#define QED_NVMETCP_MAX_IO_SIZE 0x800000 +#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr)) +#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu)) +#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16)) + +typedef int (*nvmetcp_event_cb_t) (void *context, + u8 fw_event_code, void *fw_handle); + +struct qed_dev_nvmetcp_info { + struct qed_dev_info common; + u8 port_id; /* Physical port */ + u8 num_cqs; +}; + +#define MAX_TID_BLOCKS_NVMETCP (512) +struct qed_nvmetcp_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_NVMETCP]; +}; + +struct qed_nvmetcp_id_params { + u8 mac[ETH_ALEN]; + u32 ip[4]; + u16 port; +}; + +struct qed_nvmetcp_params_offload { + /* FW initializations */ + dma_addr_t sq_pbl_addr; + dma_addr_t nvmetcp_cccid_itid_table_addr; + u16 nvmetcp_cccid_max_range; + u8 default_cq; + + /* Networking and TCP stack initializations */ + struct qed_nvmetcp_id_params src; + struct qed_nvmetcp_id_params dst; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 cwnd; + u16 mss; + u16 vlan_id; + bool timestamp_en; + bool delayed_ack_en; + bool tcp_keep_alive_en; + bool ecn_en; + u8 ip_version; + u8 ka_max_probe_cnt; + u8 ttl; + u8 tos_or_tc; + u8 rcv_wnd_scale; +}; + +struct qed_nvmetcp_params_update { + u32 max_io_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + + /* Placeholder: pfv, cpda, hpda */ + + bool hdr_digest_en; + bool data_digest_en; +}; + +struct qed_nvmetcp_cb_ops { + struct qed_common_cb_ops common; +}; + +struct nvmetcp_sge { + struct regpair sge_addr; /* SGE address */ + __le32 sge_len; /* SGE length */ + __le32 reserved; +}; + +/* IO path HSI function SGL params */ +struct storage_sgl_task_params { + struct nvmetcp_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + bool small_mid_sge; +}; + +/* IO path HSI function FW task context params */ +struct nvmetcp_task_params { + void *context; /* Output parameter - set/filled by the HSI function */ + struct nvmetcp_wqe *sqe; + u32 tx_io_size; /* in bytes (Without DIF, if exists) */ + u32 rx_io_size; /* in bytes (Without DIF, if exists) */ + u16 conn_icid; + u16 itid; + struct regpair opq; /* qedn_task_ctx address */ + u16 host_cccid; + u8 cq_rss_number; + bool send_write_incapsule; +}; + +/** + * struct qed_nvmetcp_ops - qed NVMeTCP operations. + * @common: common operations pointer + * @ll2: light L2 operations pointer + * @fill_dev_info: fills NVMeTCP specific information + * @param cdev + * @param info + * @return 0 on success, otherwise error value. + * @register_ops: register nvmetcp operations + * @param cdev + * @param ops - specified using qed_nvmetcp_cb_ops + * @param cookie - driver private + * @start: nvmetcp in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: nvmetcp in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new nvmetcp connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * @return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired nvmetcp connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @update_conn: updates an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @clear_sq: clear all task in sq + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @add_src_tcp_port_filter: Add source tcp port filter + * @param cdev + * @param src_port + * @remove_src_tcp_port_filter: Remove source tcp port filter + * @param cdev + * @param src_port + * @add_dst_tcp_port_filter: Add destination tcp port filter + * @param cdev + * @param dest_port + * @remove_dst_tcp_port_filter: Remove destination tcp port filter + * @param cdev + * @param dest_port + * @clear_all_filters: Clear all filters. + * @param cdev + */ +struct qed_nvmetcp_ops { + const struct qed_common_ops *common; + + const struct qed_ll2_ops *ll2; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_nvmetcp_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_nvmetcp_cb_ops *ops, void *cookie); + + int (*start)(struct qed_dev *cdev, + struct qed_nvmetcp_tid *tasks, + void *event_context, nvmetcp_event_cb_t async_event_cb); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_nvmetcp_params_offload *conn_info); + + int (*update_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_nvmetcp_params_update *conn_info); + + int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); + + int (*clear_sq)(struct qed_dev *cdev, u32 handle); + + int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port); + + void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port); + + int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port); + + void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port); + + void (*clear_all_filters)(struct qed_dev *cdev); + + void (*init_read_io)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); + + void (*init_write_io)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); + + void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params, + struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, + struct storage_sgl_task_params *tx_sgl_task_params, + struct storage_sgl_task_params *rx_sgl_task_params); + + void (*init_task_cleanup)(struct nvmetcp_task_params *task_params); +}; + +const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void); +void qed_put_nvmetcp_ops(void); +#endif diff --git a/include/linux/qed/qed_nvmetcp_ip_services_if.h b/include/linux/qed/qed_nvmetcp_ip_services_if.h new file mode 100644 index 000000000000..3604aee53796 --- /dev/null +++ b/include/linux/qed/qed_nvmetcp_ip_services_if.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * Copyright 2021 Marvell. All rights reserved. + */ + +#ifndef _QED_IP_SERVICES_IF_H +#define _QED_IP_SERVICES_IF_H + +#include <linux/types.h> +#include <net/route.h> +#include <net/ip6_route.h> +#include <linux/inetdevice.h> + +int qed_route_ipv4(struct sockaddr_storage *local_addr, + struct sockaddr_storage *remote_addr, + struct sockaddr *hardware_address, + struct net_device **ndev); +int qed_route_ipv6(struct sockaddr_storage *local_addr, + struct sockaddr_storage *remote_addr, + struct sockaddr *hardware_address, + struct net_device **ndev); +void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id); +struct pci_dev *qed_validate_ndev(struct net_device *ndev); +void qed_return_tcp_port(struct socket *sock); +int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr, + struct socket **sock, u16 *port); +__be16 qed_get_in_port(struct sockaddr_storage *sa); + +#endif /* _QED_IP_SERVICES_IF_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d31ecaf4fdd3..235047d7a1b5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -17,24 +17,14 @@ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H +#include <linux/rbtree_types.h> + #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/rcupdate.h> -struct rb_node { - unsigned long __rb_parent_color; - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root { - struct rb_node *rb_node; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) -#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } - /* Same as rb_first(), but O(1) */ #define rb_first_cached(root) (root)->rb_leftmost diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h new file mode 100644 index 000000000000..45b6ecde3665 --- /dev/null +++ b/include/linux/rbtree_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_RBTREE_TYPES_H +#define _LINUX_RBTREE_TYPES_H + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); +/* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +#endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index f8633d37e358..d29740be4833 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -11,15 +11,6 @@ #include <linux/rcupdate.h> /* - * Why is there no list_empty_rcu()? Because list_empty() serves this - * purpose. The list_empty() function fetches the RCU-protected pointer - * and compares it to the address of the list head, but neither dereferences - * this pointer itself nor provides this pointer to the caller. Therefore, - * it is not necessary to use rcu_dereference(), so that list_empty() can - * be used anywhere you would want to use a list_empty_rcu(). - */ - -/* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * @@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, /* * Where are list_empty_rcu() and list_first_entry_rcu()? * - * Implementing those functions following their counterparts list_empty() and - * list_first_entry() is not advisable because they lead to subtle race - * conditions as the following snippet shows: + * They do not exist because they would lead to subtle race conditions: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * - * The list may not be empty when list_empty_rcu checks it, but it may be when - * list_first_entry_rcu rereads the ->next pointer. - * - * Rereading the ->next pointer is not a problem for list_empty() and - * list_first_entry() because they would be protected by a lock that blocks - * writers. + * The list might be non-empty when list_empty_rcu() checks it, but it + * might have become empty by the time that list_first_entry_rcu() rereads + * the ->next pointer, which would result in a SEGV. + * + * When not using RCU, it is OK for list_first_entry() to re-read that + * pointer because both functions should be protected by some lock that + * blocks writers. + * + * When using RCU, list_empty() uses READ_ONCE() to fetch the + * RCU-protected ->next pointer and then compares it to the address of the + * list head. However, it neither dereferences this pointer nor provides + * this pointer to its caller. Thus, READ_ONCE() suffices (that is, + * rcu_dereference() is not needed), which means that list_empty() can be + * used anywhere you would want to use list_empty_rcu(). Just don't + * expect anything useful to happen if you do a subsequent lockless + * call to list_first_entry_rcu()!!! * * See list_first_or_null_rcu for an alternative. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9455476c5ba2..434d12fe2d4f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -53,7 +53,7 @@ void __rcu_read_unlock(void); * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ -#define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void); # define synchronize_rcu_tasks synchronize_rcu # endif -# ifdef CONFIG_TASKS_RCU_TRACE +# ifdef CONFIG_TASKS_TRACE_RCU # define rcu_tasks_trace_qs(t) \ do { \ if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ @@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void) #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ - if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ + if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ @@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ +/** + * unrcu_pointer - mark a pointer as not being RCU protected + * @p: pointer needing to lose its __rcu property + * + * Converts @p from an __rcu pointer to a __kernel pointer. + * This allows an __rcu pointer to be used with xchg() and friends. + */ +#define unrcu_pointer(p) \ +({ \ + typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ + rcu_check_sparse(p, __rcu); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ +}) + #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ @@ -518,7 +532,12 @@ do { \ * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * - * This is the RCU-bh counterpart to rcu_dereference_check(). + * This is the RCU-bh counterpart to rcu_dereference_check(). However, + * please note that starting in v5.0 kernels, vanilla RCU grace periods + * wait for local_bh_disable() regions of code in addition to regions of + * code demarked by rcu_read_lock() and rcu_read_unlock(). This means + * that synchronize_rcu(), call_rcu, and friends all take not only + * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) @@ -529,6 +548,11 @@ do { \ * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). + * However, please note that starting in v5.0 kernels, vanilla RCU grace + * periods wait for preempt_disable() regions of code in addition to + * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). + * This means that synchronize_rcu(), call_rcu, and friends all take not + * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ @@ -620,6 +644,12 @@ do { \ * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * + * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also + * wait for regions of code with preemption disabled, including regions of + * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which + * define synchronize_sched(), only code enclosed within rcu_read_lock() + * and rcu_read_unlock() are guaranteed to be waited for. + * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU @@ -672,33 +702,12 @@ static __always_inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * - * In most situations, rcu_read_unlock() is immune from deadlock. - * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() - * is responsible for deboosting, which it does via rt_mutex_unlock(). - * Unfortunately, this function acquires the scheduler's runqueue and - * priority-inheritance spinlocks. This means that deadlock could result - * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. - * - * That said, RCU readers are never priority boosted unless they were - * preempted. Therefore, one way to avoid deadlock is to make sure - * that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with one of - * rt_mutex_unlock()'s locks held. Such preemption can be avoided in - * a number of ways, for example, by invoking preempt_disable() before - * critical section's outermost rcu_read_lock(). - * - * Given that the set of locks acquired by rt_mutex_unlock() might change - * at any time, a somewhat more future-proofed approach is to make sure - * that that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with irqs disabled. - * This approach relies on the fact that rt_mutex_unlock() currently only - * acquires irq-disabled locks. - * - * The second of these two approaches is best in most situations, - * however, the first approach can also be useful, at least to those - * developers willing to keep abreast of the set of locks acquired by - * rt_mutex_unlock(). + * In almost all situations, rcu_read_unlock() is immune from deadlock. + * In recent kernels that have consolidated synchronize_sched() and + * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity + * also extends to the scheduler's runqueue and priority-inheritance + * spinlocks, courtesy of the quiescent-state deferral that is carried + * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ @@ -714,9 +723,11 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but also disables softirqs. - * Note that anything else that disables softirqs can also serve as - * an RCU read-side critical section. + * This is equivalent to rcu_read_lock(), but also disables softirqs. + * Note that anything else that disables softirqs can also serve as an RCU + * read-side critical section. However, please note that this equivalence + * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and + * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke @@ -749,9 +760,12 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent of rcu_read_lock(), but disables preemption. - * Read-side critical sections can also be introduced by anything else - * that disables preemption, including local_irq_disable() and friends. + * This is equivalent to rcu_read_lock(), but also disables preemption. + * Read-side critical sections can also be introduced by anything else that + * disables preemption, including local_irq_disable() and friends. However, + * please note that the equivalence to rcu_read_lock() applies only to + * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() + * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 35e0be326ffc..9be015305f9f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -14,9 +14,6 @@ #include <asm/param.h> /* for HZ */ -/* Never flag non-existent other CPUs! */ -static inline bool rcu_eqs_special_set(int cpu) { return false; } - unsigned long get_state_synchronize_rcu(void); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); @@ -86,7 +83,6 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } -static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index b89b54130f49..53209d669400 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -49,7 +49,6 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); -void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 3734cd8f38a8..af907a3d68d1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -79,6 +79,7 @@ extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; extern void orderly_poweroff(bool force); extern void orderly_reboot(void); +void hw_protection_shutdown(const char *reason, int ms_until_forced); /* * Emergency restart, callable from an interrupt handler. diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f87a11a5cc4a..e3c9a25a853a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -27,6 +27,7 @@ struct device_node; struct i2c_client; struct i3c_device; struct irq_domain; +struct mdio_device; struct slim_device; struct spi_device; struct spmi_device; @@ -343,6 +344,7 @@ typedef void (*regmap_unlock)(void *); * @ranges: Array of configuration entries for virtual address ranges. * @num_ranges: Number of range configuration entries. * @use_hwlock: Indicate if a hardware spinlock should be used. + * @use_raw_spinlock: Indicate if a raw spinlock should be used. * @hwlock_id: Specify the hardware spinlock id. * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, * HWLOCK_IRQ or 0. @@ -402,6 +404,7 @@ struct regmap_config { unsigned int num_ranges; bool use_hwlock; + bool use_raw_spinlock; unsigned int hwlock_id; unsigned int hwlock_mode; @@ -502,6 +505,7 @@ typedef void (*regmap_hw_free_context)(void *context); * DEFAULT, BIG is assumed. * @max_raw_read: Max raw read size that can be used on the bus. * @max_raw_write: Max raw write size that can be used on the bus. + * @free_on_exit: kfree this on exit of regmap */ struct regmap_bus { bool fast_io; @@ -519,6 +523,7 @@ struct regmap_bus { enum regmap_endian val_format_endian_default; size_t max_raw_read; size_t max_raw_write; + bool free_on_exit; }; /* @@ -538,6 +543,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__regmap_init_sccb(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -594,6 +603,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -698,6 +711,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, i2c, config) /** + * regmap_init_mdio() - Initialise register map + * + * @mdio_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_mdio(mdio_dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \ + mdio_dev, config) + +/** * regmap_init_sccb() - Initialise register map * * @i2c: Device that will be interacted with @@ -889,6 +915,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); i2c, config) /** + * devm_regmap_init_mdio() - Initialise managed register map + * + * @mdio_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_mdio(mdio_dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \ + mdio_dev, config) + +/** * devm_regmap_init_sccb() - Initialise managed register map * * @i2c: Device that will be interacted with @@ -1231,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field); int regmap_field_bulk_alloc(struct regmap *regmap, struct regmap_field **rm_field, - struct reg_field *reg_field, + const struct reg_field *reg_field, int num_fields); void regmap_field_bulk_free(struct regmap_field *field); int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap, struct regmap_field **field, - struct reg_field *reg_field, int num_fields); + const struct reg_field *reg_field, + int num_fields); void devm_regmap_field_bulk_free(struct device *dev, struct regmap_field *field); @@ -1411,6 +1452,7 @@ struct regmap_irq_sub_irq_map { * @not_fixed_stride: Used when chip peripherals are not laid out with fixed * stride. Must be used with sub_reg_offsets containing the * offsets to each peripheral. + * @status_invert: Inverted status register: cleared bits are active interrupts. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1463,6 +1505,7 @@ struct regmap_irq_chip { bool type_in_mask:1; bool clear_on_unmask:1; bool not_fixed_stride:1; + bool status_invert:1; int num_regs; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 20e84a84fb77..bbf6590a6dec 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -119,6 +119,16 @@ struct regulator_dev; #define REGULATOR_EVENT_PRE_DISABLE 0x400 #define REGULATOR_EVENT_ABORT_DISABLE 0x800 #define REGULATOR_EVENT_ENABLE 0x1000 +/* + * Following notifications should be emitted only if detected condition + * is such that the HW is likely to still be working but consumers should + * take a recovery action to prevent problems esacalating into errors. + */ +#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000 +#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000 +#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000 +#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000 +#define REGULATOR_EVENT_WARN_MASK 0x1E000 /* * Regulator errors that can be queried using regulator_get_error_flags @@ -138,6 +148,10 @@ struct regulator_dev; #define REGULATOR_ERROR_FAIL BIT(4) #define REGULATOR_ERROR_OVER_TEMP BIT(5) +#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6) +#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7) +#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8) +#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9) /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event @@ -208,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev, int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); -void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id); int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id); -void devm_regulator_bulk_unregister_supply_alias(struct device *dev, - const char *const *id, - int num_id); /* regulator output control and status */ int __must_check regulator_enable(struct regulator *regulator); @@ -394,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id) -{ -} - static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, @@ -408,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_bulk_unregister_supply_alias( - struct device *dev, const char *const *id, int num_id) -{ -} - static inline int regulator_enable(struct regulator *regulator) { return 0; diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h index 5f86824bd117..73291f280a23 100644 --- a/include/linux/regulator/coupler.h +++ b/include/linux/regulator/coupler.h @@ -52,7 +52,6 @@ struct regulator_coupler { #ifdef CONFIG_REGULATOR int regulator_coupler_register(struct regulator_coupler *coupler); -const char *rdev_get_name(struct regulator_dev *rdev); int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV, suspend_state_t state); @@ -69,10 +68,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler) { return 0; } -static inline const char *rdev_get_name(struct regulator_dev *rdev) -{ - return NULL; -} static inline int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV, suspend_state_t state) diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4ea520c248e9..bd7a73db2e66 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -40,6 +40,15 @@ enum regulator_status { REGULATOR_STATUS_UNDEFINED, }; +enum regulator_detection_severity { + /* Hardware shut down voltage outputs if condition is detected */ + REGULATOR_SEVERITY_PROT, + /* Hardware is probably damaged/inoperable */ + REGULATOR_SEVERITY_ERR, + /* Hardware is still recoverable but recovery action must be taken */ + REGULATOR_SEVERITY_WARN, +}; + /* Initialize struct linear_range for regulators */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ { \ @@ -78,8 +87,25 @@ enum regulator_status { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * - * @set_over_current_protection: Support capability of automatically shutting - * down when detecting an over current event. + * @set_over_current_protection: Support enabling of and setting limits for over + * current situation detection. Detection can be configured for three + * levels of severity. + * REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s). + * REGULATOR_SEVERITY_ERR should indicate that over-current situation is + * caused by an unrecoverable error but HW does not perform + * automatic shut down. + * REGULATOR_SEVERITY_WARN should indicate situation where hardware is + * still believed to not be damaged but that a board sepcific + * recovery action is needed. If lim_uA is 0 the limit should not + * be changed but the detection should just be enabled/disabled as + * is requested. + * @set_over_voltage_protection: Support enabling of and setting limits for over + * voltage situation detection. Detection can be configured for same + * severities as over current protection. + * @set_under_voltage_protection: Support enabling of and setting limits for + * under situation detection. + * @set_thermal_protection: Support enabling of and setting limits for over + * temperature situation detection. * * @set_active_discharge: Set active discharge enable/disable of regulators. * @@ -143,8 +169,15 @@ struct regulator_ops { int (*get_current_limit) (struct regulator_dev *); int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); - int (*set_over_current_protection) (struct regulator_dev *); - int (*set_active_discharge) (struct regulator_dev *, bool enable); + int (*set_over_current_protection)(struct regulator_dev *, int lim_uA, + int severity, bool enable); + int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV, + int severity, bool enable); + int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV, + int severity, bool enable); + int (*set_thermal_protection)(struct regulator_dev *, int lim, + int severity, bool enable); + int (*set_active_discharge)(struct regulator_dev *, bool enable); /* enable/disable regulator */ int (*enable) (struct regulator_dev *); @@ -304,6 +337,12 @@ enum regulator_type { * @pull_down_val_on: Enabling value for control when using regmap * set_pull_down * + * @ramp_reg: Register for controlling the regulator ramp-rate. + * @ramp_mask: Bitmask for the ramp-rate control register. + * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values + * should be given in units of V/S (uV/uS). See the + * regulator_set_ramp_delay_regmap(). + * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * @@ -413,6 +452,129 @@ struct regulator_config { struct gpio_desc *ena_gpiod; }; +/** + * struct regulator_err_state - regulator error/notification status + * + * @rdev: Regulator which status the struct indicates. + * @notifs: Events which have occurred on the regulator. + * @errors: Errors which are active on the regulator. + * @possible_errs: Errors which can be signaled (by given IRQ). + */ +struct regulator_err_state { + struct regulator_dev *rdev; + unsigned long notifs; + unsigned long errors; + int possible_errs; +}; + +/** + * struct regulator_irq_data - regulator error/notification status data + * + * @states: Status structs for each of the associated regulators. + * @num_states: Amount of associated regulators. + * @data: Driver data pointer given at regulator_irq_desc. + * @opaque: Value storage for IC driver. Core does not update this. ICs + * may want to store status register value here at map_event and + * compare contents at 'renable' callback to see if new problems + * have been added to status. If that is the case it may be + * desirable to return REGULATOR_ERROR_CLEARED and not + * REGULATOR_ERROR_ON to allow IRQ fire again and to generate + * notifications also for the new issues. + * + * This structure is passed to 'map_event' and 'renable' callbacks for + * reporting regulator status to core. + */ +struct regulator_irq_data { + struct regulator_err_state *states; + int num_states; + void *data; + long opaque; +}; + +/** + * struct regulator_irq_desc - notification sender for IRQ based events. + * + * @name: The visible name for the IRQ + * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be + * best to shut-down regulator(s) or reboot the SOC if error + * handling is repeatedly failing. If fatal_cnt is given the IRQ + * handling is aborted if it fails for fatal_cnt times and die() + * callback (if populated) or BUG() is called to try to prevent + * further damage. + * @reread_ms: The time which is waited before attempting to re-read status + * at the worker if IC reading fails. Immediate re-read is done + * if time is not specified. + * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the + * status for devices which keep IRQ disabled for duration of the + * error. If this is not given the IRQ is left enabled and renable + * is not called. + * @skip_off: If set to true the IRQ handler will attempt to check if any of + * the associated regulators are enabled prior to taking other + * actions. If no regulators are enabled and this is set to true + * a spurious IRQ is assumed and IRQ_NONE is returned. + * @high_prio: Boolean to indicate that high priority WQ should be used. + * @data: Driver private data pointer which will be passed as such to + * the renable, map_event and die callbacks in regulator_irq_data. + * @die: Protection callback. If IC status reading or recovery actions + * fail fatal_cnt times this callback or BUG() is called. This + * callback should implement a final protection attempt like + * disabling the regulator. If protection succeeded this may + * return 0. If anything else is returned the core assumes final + * protection failed and calls BUG() as a last resort. + * @map_event: Driver callback to map IRQ status into regulator devices with + * events / errors. NOTE: callback MUST initialize both the + * errors and notifs for all rdevs which it signals having + * active events as core does not clean the map data. + * REGULATOR_FAILED_RETRY can be returned to indicate that the + * status reading from IC failed. If this is repeated for + * fatal_cnt times the core will call die() callback or power-off + * the system as a last resort to protect the HW. + * @renable: Optional callback to check status (if HW supports that) before + * re-enabling IRQ. If implemented this should clear the error + * flags so that errors fetched by regulator_get_error_flags() + * are updated. If callback is not implemented then errors are + * assumed to be cleared and IRQ is re-enabled. + * REGULATOR_FAILED_RETRY can be returned to + * indicate that the status reading from IC failed. If this is + * repeated for 'fatal_cnt' times the core will call die() + * callback or if die() is not populated then attempt to power-off + * the system as a last resort to protect the HW. + * Returning zero indicates that the problem in HW has been solved + * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON + * indicates the error condition is still active and keeps IRQ + * disabled. Please note that returning REGULATOR_ERROR_ON does + * not retrigger evaluating what events are active or resending + * notifications. If this is needed you probably want to return + * zero and allow IRQ to retrigger causing events to be + * re-evaluated and re-sent. + * + * This structure is used for registering regulator IRQ notification helper. + */ +struct regulator_irq_desc { + const char *name; + int irq_flags; + int fatal_cnt; + int reread_ms; + int irq_off_ms; + bool skip_off; + bool high_prio; + void *data; + + int (*die)(struct regulator_irq_data *rid); + int (*map_event)(int irq, struct regulator_irq_data *rid, + unsigned long *dev_mask); + int (*renable)(struct regulator_irq_data *rid); +}; + +/* + * Return values for regulator IRQ helpers. + */ +enum { + REGULATOR_ERROR_CLEARED, + REGULATOR_FAILED_RETRY, + REGULATOR_ERROR_ON, +}; + /* * struct coupling_desc * @@ -477,6 +639,9 @@ struct regulator_dev { /* time when this regulator was disabled last time */ ktime_t last_off; + int cached_err; + bool use_cached_err; + spinlock_t err_lock; }; struct regulator_dev * @@ -487,10 +652,19 @@ devm_regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *config); void regulator_unregister(struct regulator_dev *rdev); -void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); +void *devm_regulator_irq_helper(struct device *dev, + const struct regulator_irq_desc *d, int irq, + int irq_flags, int common_errs, + int *per_rdev_errs, struct regulator_dev **rdev, + int rdev_amount); +void *regulator_irq_helper(struct device *dev, + const struct regulator_irq_desc *d, int irq, + int irq_flags, int common_errs, int *per_rdev_errs, + struct regulator_dev **rdev, int rdev_amount); +void regulator_irq_helper_cancel(void **handle); void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); @@ -540,6 +714,7 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev, int regulator_get_current_limit_regmap(struct regulator_dev *rdev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay); +int regulator_sync_voltage_rdev(struct regulator_dev *rdev); /* * Helper functions intended to be used by regulator drivers prior registering @@ -550,4 +725,14 @@ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, int regulator_desc_list_voltage_linear(const struct regulator_desc *desc, unsigned int selector); + +#ifdef CONFIG_REGULATOR +const char *rdev_get_name(struct regulator_dev *rdev); +#else +static inline const char *rdev_get_name(struct regulator_dev *rdev) +{ + return NULL; +} +#endif + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 8a56f033b6cd..621b7f4a3639 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -83,6 +83,14 @@ struct regulator_state { bool changeable; }; +#define REGULATOR_NOTIF_LIMIT_DISABLE -1 +#define REGULATOR_NOTIF_LIMIT_ENABLE -2 +struct notification_limit { + int prot; + int err; + int warn; +}; + /** * struct regulation_constraints - regulator operating constraints. * @@ -100,6 +108,11 @@ struct regulator_state { * @ilim_uA: Maximum input current. * @system_load: Load that isn't captured by any consumer requests. * + * @over_curr_limits: Limits for acting on over current. + * @over_voltage_limits: Limits for acting on over voltage. + * @under_voltage_limits: Limits for acting on under voltage. + * @temp_limits: Limits for acting on over temperature. + * * @max_spread: Max possible spread between coupled regulators * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. @@ -116,6 +129,11 @@ struct regulator_state { * @pull_down: Enable pull down when regulator is disabled. * @over_current_protection: Auto disable on over current event. * + * @over_current_detection: Configure over current limits. + * @over_voltage_detection: Configure over voltage limits. + * @under_voltage_detection: Configure under voltage limits. + * @over_temp_detection: Configure over temperature limits. + * * @input_uV: Input voltage for regulator when supplied by another regulator. * * @state_disk: State for regulator when system is suspended in disk mode. @@ -172,6 +190,10 @@ struct regulation_constraints { struct regulator_state state_disk; struct regulator_state state_mem; struct regulator_state state_standby; + struct notification_limit over_curr_limits; + struct notification_limit over_voltage_limits; + struct notification_limit under_voltage_limits; + struct notification_limit temp_limits; suspend_state_t initial_state; /* suspend state to set at init */ /* mode to set on startup */ @@ -193,6 +215,10 @@ struct regulation_constraints { unsigned soft_start:1; /* ramp voltage slowly */ unsigned pull_down:1; /* pull down resistor when regulator off */ unsigned over_current_protection:1; /* auto disable on over current */ + unsigned over_current_detection:1; /* notify on over current */ + unsigned over_voltage_detection:1; /* notify on over voltage */ + unsigned under_voltage_detection:1; /* notify on under voltage */ + unsigned over_temp_detection:1; /* notify on over temperature */ }; /** diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h new file mode 100644 index 000000000000..6d6e5a58f482 --- /dev/null +++ b/include/linux/regulator/mt6359-regulator.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __LINUX_REGULATOR_MT6359_H +#define __LINUX_REGULATOR_MT6359_H + +enum { + MT6359_ID_VS1 = 0, + MT6359_ID_VGPU11, + MT6359_ID_VMODEM, + MT6359_ID_VPU, + MT6359_ID_VCORE, + MT6359_ID_VS2, + MT6359_ID_VPA, + MT6359_ID_VPROC2, + MT6359_ID_VPROC1, + MT6359_ID_VCORE_SSHUB, + MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB, + MT6359_ID_VAUD18 = 10, + MT6359_ID_VSIM1, + MT6359_ID_VIBR, + MT6359_ID_VRF12, + MT6359_ID_VUSB, + MT6359_ID_VSRAM_PROC2, + MT6359_ID_VIO18, + MT6359_ID_VCAMIO, + MT6359_ID_VCN18, + MT6359_ID_VFE28, + MT6359_ID_VCN13, + MT6359_ID_VCN33_1_BT, + MT6359_ID_VCN33_1_WIFI, + MT6359_ID_VAUX18, + MT6359_ID_VSRAM_OTHERS, + MT6359_ID_VEFUSE, + MT6359_ID_VXO22, + MT6359_ID_VRFCK, + MT6359_ID_VBIF28, + MT6359_ID_VIO28, + MT6359_ID_VEMC, + MT6359_ID_VCN33_2_BT, + MT6359_ID_VCN33_2_WIFI, + MT6359_ID_VA12, + MT6359_ID_VA09, + MT6359_ID_VRF18, + MT6359_ID_VSRAM_MD, + MT6359_ID_VUFS, + MT6359_ID_VM18, + MT6359_ID_VBBCK, + MT6359_ID_VSRAM_PROC1, + MT6359_ID_VSIM2, + MT6359_ID_VSRAM_OTHERS_SSHUB, + MT6359_ID_RG_MAX, +}; + +#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6359_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 8b795b544f75..83c09ac36b13 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -243,7 +243,7 @@ struct fw_rsc_trace { * @da: device address * @align: the alignment between the consumer and producer parts of the vring * @num: num of buffers supported by this vring (must be power of two) - * @notifyid is a unique rproc-wide notify index for this vring. This notify + * @notifyid: a unique rproc-wide notify index for this vring. This notify * index is used when kicking a remote processor, to let it know that this * vring is triggered. * @pa: physical address @@ -266,18 +266,18 @@ struct fw_rsc_vdev_vring { /** * struct fw_rsc_vdev - virtio device header * @id: virtio device id (as in virtio_ids.h) - * @notifyid is a unique rproc-wide notify index for this vdev. This notify + * @notifyid: a unique rproc-wide notify index for this vdev. This notify * index is used when kicking a remote processor, to let it know that the * status/features of this vdev have changes. - * @dfeatures specifies the virtio device features supported by the firmware - * @gfeatures is a place holder used by the host to write back the + * @dfeatures: specifies the virtio device features supported by the firmware + * @gfeatures: a place holder used by the host to write back the * negotiated features that are supported by both sides. - * @config_len is the size of the virtio config space of this vdev. The config + * @config_len: the size of the virtio config space of this vdev. The config * space lies in the resource table immediate after this vdev header. - * @status is a place holder where the host will indicate its virtio progress. - * @num_of_vrings indicates how many vrings are described in this vdev header + * @status: a place holder where the host will indicate its virtio progress. + * @num_of_vrings: indicates how many vrings are described in this vdev header * @reserved: reserved (must be zero) - * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. * * This resource is a virtio device header: it provides information about * the vdev, and is then used by the host and its peer remote processors @@ -287,16 +287,17 @@ struct fw_rsc_vdev_vring { * to statically allocate a vdev upon registration of the rproc (dynamic vdev * allocation is not yet supported). * - * Note: unlike virtualization systems, the term 'host' here means - * the Linux side which is running remoteproc to control the remote - * processors. We use the name 'gfeatures' to comply with virtio's terms, - * though there isn't really any virtualized guest OS here: it's the host - * which is responsible for negotiating the final features. - * Yeah, it's a bit confusing. - * - * Note: immediately following this structure is the virtio config space for - * this vdev (which is specific to the vdev; for more info, read the virtio - * spec). the size of the config space is specified by @config_len. + * Note: + * 1. unlike virtualization systems, the term 'host' here means + * the Linux side which is running remoteproc to control the remote + * processors. We use the name 'gfeatures' to comply with virtio's terms, + * though there isn't really any virtualized guest OS here: it's the host + * which is responsible for negotiating the final features. + * Yeah, it's a bit confusing. + * + * 2. immediately following this structure is the virtio config space for + * this vdev (which is specific to the vdev; for more info, read the virtio + * spec). The size of the config space is specified by @config_len. */ struct fw_rsc_vdev { u32 id; @@ -368,9 +369,8 @@ enum rsc_handling_status { * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) * @handle_rsc: optional platform hook to handle vendor resources. Should return - * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a - * negative value on error - * @load_rsc_table: load resource table from firmware image + * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled + * and a negative value on error * @find_loaded_rsc_table: find the loaded resource table from firmware image * @get_loaded_rsc_table: get resource table installed in memory * by external entity @@ -440,7 +440,7 @@ enum rproc_state { * enum rproc_crash_type - remote processor crash types * @RPROC_MMUFAULT: iommu fault * @RPROC_WATCHDOG: watchdog bite - * @RPROC_FATAL_ERROR fatal error + * @RPROC_FATAL_ERROR: fatal error * * Each element of the enum is used as an array index. So that, the value of * the elements should be always something sane. @@ -457,9 +457,9 @@ enum rproc_crash_type { * enum rproc_dump_mechanism - Coredump options for core * @RPROC_COREDUMP_DISABLED: Don't perform any dump * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with - recovery + * recovery * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall - recovery until all segments are read + * recovery until all segments are read */ enum rproc_dump_mechanism { RPROC_COREDUMP_DISABLED, @@ -475,6 +475,7 @@ enum rproc_dump_mechanism { * @priv: private data associated with the dump_segment * @dump: custom dump function to fill device memory segment associated * with coredump + * @offset: offset of the segment */ struct rproc_dump_segment { struct list_head node; @@ -524,7 +525,9 @@ struct rproc_dump_segment { * @auto_boot: flag to indicate if remote processor should be auto-started * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc - * @char_dev: character device of the rproc + * @elf_class: firmware ELF class + * @elf_machine: firmware ELF machine + * @cdev: character device of the rproc * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { @@ -613,10 +616,10 @@ struct rproc_vring { * struct rproc_vdev - remoteproc state for a supported virtio device * @refcount: reference counter for the vdev and vring allocations * @subdev: handle for registering the vdev as a rproc subdevice + * @dev: device struct used for reference count semantics * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle - * @vdev: the virio device * @vring: the vrings for this vdev * @rsc_offset: offset of the vdev's resource entry * @index: vdev position versus other vdev declared in resource table diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 9b05af9b3e28..21deb5212bbd 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,8 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/pid.h> #ifdef CONFIG_PROC_CPU_RESCTRL @@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m, #endif +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/** + * struct resctrl_staged_config - parsed configuration to be applied + * @new_ctrl: new ctrl value to be loaded + * @have_new_ctrl: whether the user provided new_ctrl is valid + */ +struct resctrl_staged_config { + u32 new_ctrl; + bool have_new_ctrl; +}; + +/** + * struct rdt_domain - group of CPUs sharing a resctrl resource + * @list: all instances of this resource + * @id: unique id for this instance + * @cpu_mask: which CPUs share this resource + * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold + * @mbm_total: saved state for MBM total bandwidth + * @mbm_local: saved state for MBM local bandwidth + * @mbm_over: worker to periodically read MBM h/w counters + * @cqm_limbo: worker to periodically read CQM h/w counters + * @mbm_work_cpu: worker CPU for MBM h/w counters + * @cqm_work_cpu: worker CPU for CQM h/w counters + * @plr: pseudo-locked region (if any) associated with domain + * @staged_config: parsed configuration to be applied + */ +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + unsigned long *rmid_busy_llc; + struct mbm_state *mbm_total; + struct mbm_state *mbm_local; + struct delayed_work mbm_over; + struct delayed_work cqm_limbo; + int mbm_work_cpu; + int cqm_work_cpu; + struct pseudo_lock_region *plr; + struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; +}; + +/** + * struct resctrl_cache - Cache allocation related data + * @cbm_len: Length of the cache bit mask + * @min_cbm_bits: Minimum number of consecutive bits to be set + * @shareable_bits: Bitmask of shareable resource with other + * executing entities + * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. + * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache + * level has CPU scope. + */ +struct resctrl_cache { + unsigned int cbm_len; + unsigned int min_cbm_bits; + unsigned int shareable_bits; + bool arch_has_sparse_bitmaps; + bool arch_has_empty_bitmaps; + bool arch_has_per_cpu_cfg; +}; + +/** + * enum membw_throttle_mode - System's memory bandwidth throttling mode + * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system + * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core + * always using smallest bandwidth percentage + * assigned to threads, aka "max throttling" + * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread + */ +enum membw_throttle_mode { + THREAD_THROTTLE_UNDEFINED = 0, + THREAD_THROTTLE_MAX, + THREAD_THROTTLE_PER_THREAD, +}; + +/** + * struct resctrl_membw - Memory bandwidth allocation related data + * @min_bw: Minimum memory bandwidth percentage user can request + * @bw_gran: Granularity at which the memory bandwidth is allocated + * @delay_linear: True if memory B/W delay is in linear scale + * @arch_needs_linear: True if we can't configure non-linear resources + * @throttle_mode: Bandwidth throttling mode when threads request + * different memory bandwidths + * @mba_sc: True if MBA software controller(mba_sc) is enabled + * @mb_map: Mapping of memory B/W percentage to memory B/W delay + */ +struct resctrl_membw { + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + bool arch_needs_linear; + enum membw_throttle_mode throttle_mode; + bool mba_sc; + u32 *mb_map; +}; + +struct rdt_parse_data; +struct resctrl_schema; + +/** + * struct rdt_resource - attributes of a resctrl resource + * @rid: The index of the resource + * @alloc_enabled: Is allocation enabled on this machine + * @mon_enabled: Is monitoring enabled for this feature + * @alloc_capable: Is allocation available on this machine + * @mon_capable: Is monitor feature available on this machine + * @num_rmid: Number of RMIDs available + * @cache_level: Which cache level defines scope of this resource + * @cache: Cache allocation related data + * @membw: If the component has bandwidth controls, their properties. + * @domains: All domains for this resource + * @name: Name to use in "schemata" file. + * @data_width: Character width of data when displaying + * @default_ctrl: Specifies default cache cbm or memory B/W percent. + * @format_str: Per resource format string to show domain value + * @parse_ctrlval: Per resource function pointer to parse control values + * @evt_list: List of monitoring events + * @fflags: flags to choose base and info files + * @cdp_capable: Is the CDP feature available on this resource + */ +struct rdt_resource { + int rid; + bool alloc_enabled; + bool mon_enabled; + bool alloc_capable; + bool mon_capable; + int num_rmid; + int cache_level; + struct resctrl_cache cache; + struct resctrl_membw membw; + struct list_head domains; + char *name; + int data_width; + u32 default_ctrl; + const char *format_str; + int (*parse_ctrlval)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + struct list_head evt_list; + unsigned long fflags; + bool cdp_capable; +}; + +/** + * struct resctrl_schema - configuration abilities of a resource presented to + * user-space + * @list: Member of resctrl_schema_all. + * @name: The name to use in the "schemata" file. + * @conf_type: Whether this schema is specific to code/data. + * @res: The resource structure exported by the architecture to describe + * the hardware that is configured by this schema. + * @num_closid: The number of closid that can be used with this schema. When + * features like CDP are enabled, this will be lower than the + * hardware supports for the resource. + */ +struct resctrl_schema { + struct list_head list; + char name[8]; + enum resctrl_conf_type conf_type; + struct rdt_resource *res; + u32 num_closid; +}; + +/* The number of closid supported by this resource regardless of CDP */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *r); +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type); + #endif /* _RESCTRL_H */ diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index ec35814e0bbb..0fa4f60e1186 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -79,6 +79,7 @@ struct reset_controller_dev { unsigned int nr_resets; }; +#if IS_ENABLED(CONFIG_RESET_CONTROLLER) int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); @@ -88,5 +89,26 @@ int devm_reset_controller_register(struct device *dev, void reset_controller_add_lookup(struct reset_control_lookup *lookup, unsigned int num_entries); +#else +static inline int reset_controller_register(struct reset_controller_dev *rcdev) +{ + return 0; +} + +static inline void reset_controller_unregister(struct reset_controller_dev *rcdev) +{ +} + +static inline int devm_reset_controller_register(struct device *dev, + struct reset_controller_dev *rcdev) +{ + return 0; +} + +static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, + unsigned int num_entries) +{ +} +#endif #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index def5c62c93b3..c976cc6de257 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -86,18 +86,15 @@ struct anon_vma_chain { }; enum ttu_flags { - TTU_MIGRATION = 0x1, /* migration mode */ - TTU_MUNLOCK = 0x2, /* munlock mode */ - TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ + TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible * and caller guarantees they will * do a final flush if necessary */ TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: * caller holds it */ - TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ }; #ifdef CONFIG_MMU @@ -194,7 +191,12 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_migrate(struct page *page, enum ttu_flags flags); +void try_to_unmap(struct page *, enum ttu_flags flags); + +int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, + unsigned long end, struct page **pages, + void *arg); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) @@ -239,7 +241,7 @@ int page_mkclean(struct page *); * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ -void try_to_munlock(struct page *); +void page_mlock(struct page *page); void remove_migration_ptes(struct page *old, struct page *new, bool locked); @@ -289,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs) false +static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +{ +} static inline int page_mkclean(struct page *page) { diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index d1672de9ca89..9deedfeec2b1 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,12 +13,39 @@ #ifndef __LINUX_RT_MUTEX_H #define __LINUX_RT_MUTEX_H +#include <linux/compiler.h> #include <linux/linkage.h> -#include <linux/rbtree.h> -#include <linux/spinlock_types.h> +#include <linux/rbtree_types.h> +#include <linux/spinlock_types_raw.h> extern int max_lock_depth; /* for sysctl */ +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ +{ \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ + .waiters = RB_ROOT_CACHED, \ + .owner = NULL \ +} + +/** + * rt_mutex_base_is_locked - is the rtmutex locked + * @lock: the mutex to be queried + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) +{ + return READ_ONCE(lock->owner) != NULL; +} + +extern void rt_mutex_base_init(struct rt_mutex_base *rtb); + /** * The rt_mutex structure * @@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */ * @owner: the mutex owner */ struct rt_mutex { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; + struct rt_mutex_base rtmutex; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -52,32 +77,24 @@ do { \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ - , .dep_map = { .name = #mutexname } +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + .dep_map = { \ + .name = #mutexname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT_CACHED \ - , .owner = NULL \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} +#define __RT_MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ +} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -/** - * rt_mutex_is_locked - is the mutex locked - * @lock: the mutex to be queried - * - * Returns 1 if the mutex is locked, 0 if unlocked. - */ -static inline int rt_mutex_is_locked(struct rt_mutex *lock) -{ - return lock->owner != NULL; -} - extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 6f155f99aa16..4ab7bfc675f1 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1109,6 +1109,7 @@ struct pcr_ops { }; enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; +enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG}; #define ASPM_L1_1_EN BIT(0) #define ASPM_L1_2_EN BIT(1) @@ -1234,6 +1235,7 @@ struct rtsx_pcr { u8 card_drive_sel; #define ASPM_L1_EN 0x02 u8 aspm_en; + enum ASPM_MODE aspm_mode; bool aspm_enabled; #define PCR_MS_PMOS (1 << 0) diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h new file mode 100644 index 000000000000..1d264dd08625 --- /dev/null +++ b/include/linux/rwbase_rt.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef _LINUX_RWBASE_RT_H +#define _LINUX_RWBASE_RT_H + +#include <linux/rtmutex.h> +#include <linux/atomic.h> + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rwbase_rt { + atomic_t readers; + struct rt_mutex_base rtmutex; +}; + +#define __RWBASE_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \ +} + +#define init_rwbase_rt(rwbase) \ + do { \ + rt_mutex_base_init(&(rwbase)->rtmutex); \ + atomic_set(&(rwbase)->readers, READER_BIAS); \ + } while (0) + + +static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) != READER_BIAS; +} + +static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) > 0; +} + +#endif /* _LINUX_RWBASE_RT_H */ diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 000000000000..49c1f3842ed5 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_RT_H +#error Do not #include directly. Use <linux/spinlock.h>. +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name, + struct lock_class_key *key); +#else +static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name, + struct lock_class_key *key) +{ +} +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(rwl)->rwbase); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void rt_read_lock(rwlock_t *rwlock); +extern int rt_read_trylock(rwlock_t *rwlock); +extern void rt_read_unlock(rwlock_t *rwlock); +extern void rt_write_lock(rwlock_t *rwlock); +extern int rt_write_trylock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock); + +static __always_inline void read_lock(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_irq(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_read_lock(lock); \ + flags = 0; \ + } while (0) + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) + +static __always_inline void read_unlock(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_bh(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void read_unlock_irq(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void write_lock(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_irq(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_write_lock(lock); \ + flags = 0; \ + } while (0) + +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = write_trylock(lock); \ + __locked; \ +}) + +static __always_inline void write_unlock(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_bh(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void write_unlock_irq(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_write_unlock(rwlock); +} + +#define rwlock_is_contended(lock) (((void)(lock), 0)) + +#endif /* __LINUX_RWLOCK_RT_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 3bd03e18061c..1948442e7750 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,9 +1,23 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers + * generic rwlock type definitions and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). @@ -21,16 +35,6 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ @@ -46,4 +50,29 @@ typedef struct { #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +typedef struct { + struct rwbase_rt rwbase; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define __RWLOCK_RT_INITIALIZER(name) \ +{ \ + .rwbase = __RWBASE_INITIALIZER(name), \ + RW_DEP_MAP_INIT(name) \ +} + +#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index a66038d88878..352c6127cb90 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,19 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + }, +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) /* Common initializer macros and functions */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - }, -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_RWSEMS # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else @@ -119,6 +122,53 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +struct rw_semaphore { + struct rwbase_rt rwbase; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ + { \ + .rwbase = __RWBASE_INITIALIZER(name), \ + __RWSEM_DEP_MAP_INIT(name) \ + } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return rw_base_is_locked(&sem->rwbase); +} + +static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return rw_base_is_contended(&sem->rwbase); +} + +#endif /* CONFIG_PREEMPT_RT */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 6f70572b2938..266754a55327 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -39,6 +39,12 @@ struct sg_table { unsigned int orig_nents; /* original size of list */ }; +struct sg_append_table { + struct sg_table sgt; /* The scatter list table */ + struct scatterlist *prv; /* last populated sge in the table */ + unsigned int total_nents; /* Total entries in the table */ +}; + /* * Notes on SG table design. * @@ -280,19 +286,51 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); void __sg_free_table(struct sg_table *, unsigned int, unsigned int, - sg_free_fn *); + sg_free_fn *, unsigned int); void sg_free_table(struct sg_table *); +void sg_free_append_table(struct sg_append_table *sgt); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, - struct page **pages, unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask); -int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, gfp_t gfp_mask); +int sg_alloc_append_table_from_pages(struct sg_append_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned int offset, unsigned long size, + unsigned int max_segment, + unsigned int left_pages, gfp_t gfp_mask); +int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, + unsigned int max_segment, gfp_t gfp_mask); + +/** + * sg_alloc_table_from_pages - Allocate and initialize an sg table from + * an array of pages + * @sgt: The sg table header to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array + * @offset: Offset from start of the first page to the start of a buffer + * @size: Number of valid bytes in the buffer (after offset) + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table from a list of pages. Contiguous + * ranges of the pages are squashed into a single scatterlist node. A user + * may provide an offset at a start and a size of valid data in a buffer + * specified by the page array. The returned sg table is released by + * sg_free_table. + * + * Returns: + * 0 on success, negative error on failure + */ +static inline int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, + unsigned int n_pages, + unsigned int offset, + unsigned long size, gfp_t gfp_mask) +{ + return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, + size, UINT_MAX, gfp_mask); +} #ifdef CONFIG_SGL_ALLOC struct scatterlist *sgl_alloc_order(unsigned long long length, @@ -474,7 +512,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * Iterates over sg entries mapping page-by-page. On each successful * iteration, @miter->page points to the mapped page and * @miter->length bytes of data can be accessed at @miter->addr. As - * long as an interation is enclosed between start and stop, the user + * long as an iteration is enclosed between start and stop, the user * is free to choose control structure and when to stop. * * @miter->consumed is set to @miter->length on each iteration. It diff --git a/include/linux/sched.h b/include/linux/sched.h index d2c881384517..e12b524426b0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -42,6 +42,7 @@ struct backing_dev_info; struct bio_list; struct blk_plug; struct bpf_local_storage; +struct bpf_run_ctx; struct capture_control; struct cfs_rq; struct fs_struct; @@ -95,7 +96,9 @@ struct task_group; #define TASK_WAKING 0x0200 #define TASK_NOLOAD 0x0400 #define TASK_NEW 0x0800 -#define TASK_STATE_MAX 0x1000 +/* RT specific auxilliary flag to mark RT lock waiters */ +#define TASK_RTLOCK_WAIT 0x1000 +#define TASK_STATE_MAX 0x2000 /* Convenience macros for the sake of set_current_state: */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -113,13 +116,13 @@ struct task_group; __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ TASK_PARKED) -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) +#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0) -#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0) -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) /* * Special states are those that do not use the normal wait-loop pattern. See @@ -128,30 +131,37 @@ struct task_group; #define is_special_task_state(state) \ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) -#define __set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - current->state = (state_value); \ - } while (0) - -#define set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->state, (state_value)); \ +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define debug_normal_state_change(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value)); \ + current->task_state_change = _THIS_IP_; \ } while (0) -#define set_special_state(state_value) \ +# define debug_special_state_change(state_value) \ do { \ - unsigned long flags; /* may shadow */ \ WARN_ON_ONCE(!is_special_task_state(state_value)); \ - raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current->task_state_change = _THIS_IP_; \ - current->state = (state_value); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) + +# define debug_rtlock_wait_set_state() \ + do { \ + current->saved_state_change = current->task_state_change;\ + current->task_state_change = _THIS_IP_; \ + } while (0) + +# define debug_rtlock_wait_restore_state() \ + do { \ + current->task_state_change = current->saved_state_change;\ + } while (0) + #else +# define debug_normal_state_change(cond) do { } while (0) +# define debug_special_state_change(cond) do { } while (0) +# define debug_rtlock_wait_set_state() do { } while (0) +# define debug_rtlock_wait_restore_state() do { } while (0) +#endif + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -190,26 +200,79 @@ struct task_group; * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ - current->state = (state_value) + do { \ + debug_normal_state_change((state_value)); \ + WRITE_ONCE(current->__state, (state_value)); \ + } while (0) #define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + smp_store_mb(current->__state, (state_value)); \ + } while (0) /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must - * serialize against wakeups such that any possible in-flight TASK_RUNNING stores - * will not collide with our state change. + * serialize against wakeups such that any possible in-flight TASK_RUNNING + * stores will not collide with our state change. */ #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ + \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ - current->state = (state_value); \ + debug_special_state_change((state_value)); \ + WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) -#endif +/* + * PREEMPT_RT specific variants for "sleeping" spin/rwlocks + * + * RT's spin/rwlock substitutions are state preserving. The state of the + * task when blocking on the lock is saved in task_struct::saved_state and + * restored after the lock has been acquired. These operations are + * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT + * lock related wakeups while the task is blocked on the lock are + * redirected to operate on task_struct::saved_state to ensure that these + * are not dropped. On restore task_struct::saved_state is set to + * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. + * + * The lock operation looks like this: + * + * current_save_and_set_rtlock_wait_state(); + * for (;;) { + * if (try_lock()) + * break; + * raw_spin_unlock_irq(&lock->wait_lock); + * schedule_rtlock(); + * raw_spin_lock_irq(&lock->wait_lock); + * set_current_state(TASK_RTLOCK_WAIT); + * } + * current_restore_rtlock_saved_state(); + */ +#define current_save_and_set_rtlock_wait_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + current->saved_state = current->__state; \ + debug_rtlock_wait_set_state(); \ + WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); + +#define current_restore_rtlock_saved_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + debug_rtlock_wait_restore_state(); \ + WRITE_ONCE(current->__state, current->saved_state); \ + current->saved_state = TASK_RUNNING; \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); + +#define get_current_state() READ_ONCE(current->__state) /* Task command name length: */ #define TASK_COMM_LEN 16 @@ -226,6 +289,9 @@ extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); asmlinkage void preempt_schedule_irq(void); +#ifdef CONFIG_PREEMPT_RT + extern void schedule_rtlock(void); +#endif extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); @@ -350,11 +416,19 @@ struct load_weight { * Only for tasks we track a moving average of the past instantaneous * estimated utilization. This allows to absorb sporadic drops in utilization * of an otherwise almost periodic task. + * + * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg + * updates. When a task is dequeued, its util_est should not be updated if its + * util_avg has not been updated in the meantime. + * This information is mapped into the MSB bit of util_est.enqueued at dequeue + * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg + * for a task) it is safe to use MSB. */ struct util_est { unsigned int enqueued; unsigned int ewma; #define UTIL_EST_WEIGHT_SHIFT 2 +#define UTIL_AVG_UNCHANGED 0x80000000 } __attribute__((__aligned__(sizeof(u64)))); /* @@ -654,8 +728,12 @@ struct task_struct { */ struct thread_info thread_info; #endif - /* -1 unrunnable, 0 runnable, >0 stopped: */ - volatile long state; + unsigned int __state; + +#ifdef CONFIG_PREEMPT_RT + /* saved state for "spinlock sleepers" */ + unsigned int saved_state; +#endif /* * This begins the randomizable portion of task_struct. Only @@ -700,10 +778,17 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; + struct sched_dl_entity dl; + +#ifdef CONFIG_SCHED_CORE + struct rb_node core_node; + unsigned long core_cookie; + unsigned int core_occupation; +#endif + #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; #endif - struct sched_dl_entity dl; #ifdef CONFIG_UCLAMP_TASK /* @@ -730,6 +815,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; #ifdef CONFIG_SMP @@ -845,6 +931,10 @@ struct task_struct { /* Used by page_owner=on to detect recursion in page tracking. */ unsigned in_page_owner:1; #endif +#ifdef CONFIG_EVENTFD + /* Recursion prevention for eventfd_signal() */ + unsigned in_eventfd_signal:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -989,7 +1079,6 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; - struct sigqueue *sigqueue_cache; sigset_t blocked; sigset_t real_blocked; /* Restored if set_restore_sigmask() was used: */ @@ -1340,6 +1429,9 @@ struct task_struct { struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; +# ifdef CONFIG_PREEMPT_RT + unsigned long saved_state_change; +# endif #endif int pagefault_disabled; #ifdef CONFIG_MMU @@ -1362,6 +1454,8 @@ struct task_struct { #ifdef CONFIG_BPF_SYSCALL /* Used by BPF task local storage */ struct bpf_local_storage __rcu *bpf_storage; + /* Used for BPF run context */ + struct bpf_run_ctx *bpf_ctx; #endif #ifdef CONFIG_GCC_PLUGIN_STACKLEAK @@ -1383,6 +1477,16 @@ struct task_struct { struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH + /* + * If L1D flush is supported on mm context switch + * then we use this callback head to queue kill work + * to kill tasks that are not running on SMT disabled + * cores + */ + struct callback_head l1d_flush_kill; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1513,7 +1617,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) static inline unsigned int task_state_index(struct task_struct *tsk) { - unsigned int tsk_state = READ_ONCE(tsk->state); + unsigned int tsk_state = READ_ONCE(tsk->__state); unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); @@ -1688,6 +1792,11 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_ #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); +extern void release_user_cpus_ptr(struct task_struct *p); +extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); +extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); +extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -1698,6 +1807,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma return -EINVAL; return 0; } +static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) +{ + if (src->user_cpus_ptr) + return -EINVAL; + return 0; +} +static inline void release_user_cpus_ptr(struct task_struct *p) +{ + WARN_ON(p->user_cpus_ptr); +} + +static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) +{ + return 0; +} #endif extern int yield_to(struct task_struct *p, bool preempt); @@ -1821,10 +1945,10 @@ static __always_inline void scheduler_ipi(void) */ preempt_fold_need_resched(); } -extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) { return 1; } @@ -2011,6 +2135,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +extern bool sched_task_on_rq(struct task_struct *p); + /* * In order to reduce various lock holder preemption latencies provide an * interface to see if a vCPU is currently running or not. @@ -2172,4 +2298,14 @@ int sched_trace_rq_nr_running(struct rq *rq); const struct cpumask *sched_trace_rd_span(struct root_domain *rd); +#ifdef CONFIG_SCHED_CORE +extern void sched_core_free(struct task_struct *tsk); +extern void sched_core_fork(struct task_struct *p); +extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, + unsigned long uaddr); +#else +static inline void sched_core_free(struct task_struct *tsk) { } +static inline void sched_core_fork(struct task_struct *p) { } +#endif + #endif diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index dfd82eab2902..4d9e3a656875 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -73,6 +73,14 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 27 /* mm is shared between processes */ +/* + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into + * a counter on its own. We're aggresive on this bit for now: even if the + * pinned pages were unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm, just for simplicity. + */ +#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 6205578ab6ee..bdd31ab93bc5 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -26,7 +26,7 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) { - return (freq + (freq >> 2)) * util / cap; + return freq * util / cap; } static inline unsigned long map_util_perf(unsigned long util) diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index ae51f4529fc9..b5035afa2396 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu); /* * Only dump TASK_* tasks. (0 for all tasks) */ -extern void show_state_filter(unsigned long state_filter); +extern void show_state_filter(unsigned int state_filter); static inline void show_state(void) { diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index e24b1fe348e3..5561486fddef 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -174,13 +174,13 @@ static inline gfp_t current_gfp_context(gfp_t flags) } #ifdef CONFIG_LOCKDEP -extern void __fs_reclaim_acquire(void); -extern void __fs_reclaim_release(void); +extern void __fs_reclaim_acquire(unsigned long ip); +extern void __fs_reclaim_release(unsigned long ip); extern void fs_reclaim_acquire(gfp_t gfp_mask); extern void fs_reclaim_release(gfp_t gfp_mask); #else -static inline void __fs_reclaim_acquire(void) { } -static inline void __fs_reclaim_release(void) { } +static inline void __fs_reclaim_acquire(unsigned long ip) { } +static inline void __fs_reclaim_release(unsigned long ip) { } static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif @@ -306,7 +306,7 @@ set_active_memcg(struct mem_cgroup *memcg) { struct mem_cgroup *old; - if (in_interrupt()) { + if (!in_task()) { old = this_cpu_read(int_active_memcg); this_cpu_write(int_active_memcg, memcg); } else { diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index 34b21e971d77..57bde66d95f7 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -91,6 +91,16 @@ SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD) SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) /* + * Domain members have different CPU capacities spanning all unique CPU + * capacity values. + * + * SHARED_PARENT: Set from the topmost domain down to the first domain where + * all available CPU capacities are visible + * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups. + */ +SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) + +/* * Domain members share CPU capacity (i.e. SMT) * * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 7f4278fa21fe..e5f4ce622ee6 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -298,11 +298,6 @@ static inline void kernel_signal_stop(void) schedule(); } -#ifdef __ARCH_SI_TRAPNO -# define ___ARCH_SI_TRAPNO(_a1) , _a1 -#else -# define ___ARCH_SI_TRAPNO(_a1) -#endif #ifdef __ia64__ # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 #else @@ -310,14 +305,11 @@ static inline void kernel_signal_stop(void) #endif int force_sig_fault_to_task(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); int force_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); int send_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); @@ -329,6 +321,10 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); +int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); +int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, + struct task_struct *t); +int force_sig_seccomp(int syscall, int reason, bool force_coredump); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig); @@ -382,7 +378,7 @@ static inline int fatal_signal_pending(struct task_struct *p) return task_sigpending(p) && __fatal_signal_pending(p); } -static inline int signal_pending_state(long state, struct task_struct *p) +static inline int signal_pending_state(unsigned int state, struct task_struct *p) { if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) return 0; @@ -538,6 +534,17 @@ static inline int kill_cad_pid(int sig, int priv) #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) +static inline int __on_sig_stack(unsigned long sp) +{ +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + /* * True if we are on the alternate signal stack. */ @@ -555,13 +562,7 @@ static inline int on_sig_stack(unsigned long sp) if (current->sas_ss_flags & SS_AUTODISARM) return 0; -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif + return __on_sig_stack(sp); } static inline int sas_ss_flags(unsigned long sp) @@ -709,6 +710,12 @@ static inline void unlock_task_sighand(struct task_struct *task, spin_unlock_irqrestore(&task->sighand->siglock, *flags); } +#ifdef CONFIG_LOCKDEP +extern void lockdep_assert_task_sighand_held(struct task_struct *task); +#else +static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } +#endif + static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index 568286411b43..0108a38bb64d 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -3,6 +3,7 @@ #define _LINUX_SCHED_STAT_H #include <linux/percpu.h> +#include <linux/kconfig.h> /* * Various counters maintained by the scheduler and fork(), @@ -16,21 +17,14 @@ extern unsigned long total_forks; extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); -extern unsigned long nr_running(void); +extern unsigned int nr_running(void); extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); +extern unsigned int nr_iowait(void); +extern unsigned int nr_iowait_cpu(int cpu); static inline int sched_info_on(void) { -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif + return IS_ENABLED(CONFIG_SCHED_INFO); } #ifdef CONFIG_SCHEDSTATS diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index db2c0f34aaaf..304f431178fd 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -28,30 +28,12 @@ enum { sysctl_hung_task_timeout_secs = 0 }; extern unsigned int sysctl_sched_child_runs_first; -extern unsigned int sysctl_sched_latency; -extern unsigned int sysctl_sched_min_granularity; -extern unsigned int sysctl_sched_wakeup_granularity; - enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; -extern unsigned int sysctl_sched_tunable_scaling; - -extern unsigned int sysctl_numa_balancing_scan_delay; -extern unsigned int sysctl_numa_balancing_scan_period_min; -extern unsigned int sysctl_numa_balancing_scan_period_max; -extern unsigned int sysctl_numa_balancing_scan_size; - -#ifdef CONFIG_SCHED_DEBUG -extern __read_mostly unsigned int sysctl_sched_migration_cost; -extern __read_mostly unsigned int sysctl_sched_nr_migrate; - -extern int sysctl_resched_latency_warn_ms; -extern int sysctl_resched_latency_warn_once; -#endif /* * control realtime throttling: diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 3632c5d6ec55..00ed419dd464 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -4,6 +4,7 @@ #include <linux/uidgid.h> #include <linux/atomic.h> +#include <linux/percpu_counter.h> #include <linux/refcount.h> #include <linux/ratelimit.h> @@ -12,16 +13,9 @@ */ struct user_struct { refcount_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ #ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ + struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */ #endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ -#endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 26a2013ac39c..06cd8fb2f409 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -42,8 +42,11 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } +#define WAKE_Q_HEAD_INITIALIZER(name) \ + { WAKE_Q_TAIL, &name.first } + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name) static inline void wake_q_init(struct wake_q_head *head) { diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 528718e4ed52..835ee87ed792 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -14,7 +14,7 @@ * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit * clocks. * @read_sched_clock: Current clock source (or dummy source when suspended). - * @mult: Multipler for scaled math conversion. + * @mult: Multiplier for scaled math conversion. * @shift: Shift value for scaled math conversion. * * Care must be taken when updating this structure; it is read by diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 79d0a1237e6c..80e781c51ddc 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -101,6 +101,10 @@ struct scmi_clk_proto_ops { * to sustained performance level mapping * @est_power_get: gets the estimated power cost for a given performance domain * at a given frequency + * @fast_switch_possible: indicates if fast DVFS switching is possible or not + * for a given device + * @power_scale_mw_get: indicates if the power values provided are in milliWatts + * or in some other (abstract) scale */ struct scmi_perf_proto_ops { int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, @@ -153,7 +157,7 @@ struct scmi_power_proto_ops { }; /** - * scmi_sensor_reading - represent a timestamped read + * struct scmi_sensor_reading - represent a timestamped read * * Used by @reading_get_timestamped method. * @@ -167,7 +171,7 @@ struct scmi_sensor_reading { }; /** - * scmi_range_attrs - specifies a sensor or axis values' range + * struct scmi_range_attrs - specifies a sensor or axis values' range * @min_range: The minimum value which can be represented by the sensor/axis. * @max_range: The maximum value which can be represented by the sensor/axis. */ @@ -177,7 +181,7 @@ struct scmi_range_attrs { }; /** - * scmi_sensor_axis_info - describes one sensor axes + * struct scmi_sensor_axis_info - describes one sensor axes * @id: The axes ID. * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class. * @scale: Power-of-10 multiplier applied to the axis unit. @@ -205,8 +209,8 @@ struct scmi_sensor_axis_info { }; /** - * scmi_sensor_intervals_info - describes number and type of available update - * intervals + * struct scmi_sensor_intervals_info - describes number and type of available + * update intervals * @segmented: Flag for segmented intervals' representation. When True there * will be exactly 3 intervals in @desc, with each entry * representing a member of a segment in this order: diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index afbf8037d8db..d2176a56828a 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -51,6 +51,14 @@ struct scpi_sensor_info { * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information + * @device_domain_id: gets the scpi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @sensor_get_capability: get the list of capabilities for the sensors + * @sensor_get_info: get the information of the specified sensor + * @sensor_get_value: gets the current value of the sensor + * @device_get_power_state: gets the power state of a power domain + * @device_set_power_state: sets the power state of a power domain */ struct scpi_ops { u32 (*get_version)(void); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index bb1926589693..a86e852507b3 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -98,6 +98,7 @@ enum sctp_cid { SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, + SCTP_CID_PAD = 0x84, }; /* enum */ @@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk { }; +/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */ +struct sctp_pad_chunk { + struct sctp_chunkhdr uh; +}; + + /* For the abort and shutdown ACK we must carry the init tag in the * common header. Just the common header is all that is needed with a * chunk descriptor. diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h new file mode 100644 index 000000000000..21c3771e6a56 --- /dev/null +++ b/include/linux/secretmem.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_SECRETMEM_H +#define _LINUX_SECRETMEM_H + +#ifdef CONFIG_SECRETMEM + +extern const struct address_space_operations secretmem_aops; + +static inline bool page_is_secretmem(struct page *page) +{ + struct address_space *mapping; + + /* + * Using page_mapping() is quite slow because of the actual call + * instruction and repeated compound_head(page) inside the + * page_mapping() function. + * We know that secretmem pages are not compound and LRU so we can + * save a couple of cycles here. + */ + if (PageCompound(page) || !PageLRU(page)) + return false; + + mapping = (struct address_space *) + ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); + + if (mapping != page->mapping) + return false; + + return mapping->a_ops == &secretmem_aops; +} + +bool vma_is_secretmem(struct vm_area_struct *vma); +bool secretmem_active(void); + +#else + +static inline bool vma_is_secretmem(struct vm_area_struct *vma) +{ + return false; +} + +static inline bool page_is_secretmem(struct page *page) +{ + return false; +} + +static inline bool secretmem_active(void) +{ + return false; +} + +#endif /* CONFIG_SECRETMEM */ + +#endif /* _LINUX_SECRETMEM_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 06f7c50ce77f..5b7288521300 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -120,10 +120,11 @@ enum lockdown_reason { LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, + LOCKDOWN_BPF_WRITE_USER, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, - LOCKDOWN_BPF_READ, + LOCKDOWN_BPF_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, @@ -1681,7 +1682,7 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid); int security_xfrm_state_delete(struct xfrm_state *x); void security_xfrm_state_free(struct xfrm_state *x); -int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid); int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic); @@ -1732,7 +1733,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x) return 0; } -static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) { return 0; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 723b1fa1177e..dd99569595fd 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -126,8 +126,16 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num void seq_put_hex_ll(struct seq_file *m, const char *delimiter, unsigned long long v, unsigned int width); +void seq_escape_mem(struct seq_file *m, const char *src, size_t len, + unsigned int flags, const char *esc); + +static inline void seq_escape_str(struct seq_file *m, const char *src, + unsigned int flags, const char *esc) +{ + seq_escape_mem(m, src, strlen(src), flags, esc); +} + void seq_escape(struct seq_file *m, const char *s, const char *esc); -void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index f61e34fbaaea..37ded6b8fee6 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -182,9 +182,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) -#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock); -#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex); -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex); +#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) +#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) +#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 9f14f9c12ec4..3368c261ab62 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -327,4 +327,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port) } #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ +struct acpi_resource; +struct acpi_resource_uart_serialbus; + +#ifdef CONFIG_ACPI +bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart); +#else +static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart) +{ + return false; +} +#endif /* CONFIG_ACPI */ + #endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 9e655055112d..5db211f43b29 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -146,7 +146,7 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) return container_of(up, struct uart_8250_port, port); } -int serial8250_register_8250_port(struct uart_8250_port *); +int serial8250_register_8250_port(const struct uart_8250_port *); void serial8250_unregister_port(int line); void serial8250_suspend_port(int line); void serial8250_resume_port(int line); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index d7ed00f1594e..c58cc142d23f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -408,7 +408,8 @@ int uart_register_driver(struct uart_driver *uart); void uart_unregister_driver(struct uart_driver *uart); int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); -int uart_match_port(struct uart_port *port1, struct uart_port *port2); +bool uart_match_port(const struct uart_port *port1, + const struct uart_port *port2); /* * Power Management @@ -428,7 +429,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; - if ((tty && tty->stopped) || port->hw_stopped) + if ((tty && tty->flow.stopped) || port->hw_stopped) return 1; return 0; } @@ -517,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) if (sysrq_ch) handle_sysrq(sysrq_ch); } + +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, flags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, flags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { @@ -530,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { spin_unlock(&port->lock); } +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + spin_unlock_irqrestore(&port->lock, flags); +} #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ /* diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index f6c3323fc4c5..cf0de4a86640 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -27,6 +27,15 @@ #define S3C2410_UERSTAT (0x14) #define S3C2410_UFSTAT (0x18) #define S3C2410_UMSTAT (0x1C) +#define USI_CON (0xC4) +#define USI_OPTION (0xC8) + +#define USI_CON_RESET (1<<0) +#define USI_CON_RESET_MASK (1<<0) + +#define USI_OPTION_HWACG_CLKREQ_ON (1<<1) +#define USI_OPTION_HWACG_CLKSTOP_ON (1<<2) +#define USI_OPTION_HWACG_MASK (3<<1) #define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index fe1aa4e54680..f36be5166c19 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -28,7 +28,19 @@ static inline bool kernel_page_present(struct page *page) { return true; } +#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +/* + * Some architectures, e.g. ARM64 can disable direct map modifications at + * boot time. Let them overrive this query. + */ +#ifndef can_set_direct_map +static inline bool can_set_direct_map(void) +{ + return true; +} +#define can_set_direct_map can_set_direct_map #endif +#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn, bool unmap) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index d82b6f396588..166158b6e917 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -18,6 +18,7 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ + pgoff_t fallocend; /* highest fallocate endindex */ struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ @@ -31,7 +32,7 @@ struct shmem_sb_info { struct percpu_counter used_blocks; /* How many are allocated */ unsigned long max_inodes; /* How many inodes are allowed */ unsigned long free_inodes; /* How many are left for allocation */ - spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ umode_t mode; /* Mount mode for root directory */ unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ @@ -65,7 +66,7 @@ extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts); #ifdef CONFIG_SHMEM extern const struct address_space_operations shmem_aops; static inline bool shmem_mapping(struct address_space *mapping) @@ -85,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(unsigned int type, bool frontswap, unsigned long *fs_pages_to_unuse); -extern bool shmem_huge_enabled(struct vm_area_struct *vma); +extern bool shmem_is_huge(struct vm_area_struct *vma, + struct inode *inode, pgoff_t index); +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff); +} extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -93,9 +99,8 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, /* Flag allocation requirements to shmem_getpage */ enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ - SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ - SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; @@ -119,24 +124,33 @@ static inline bool shmem_file(struct file *file) return shmem_mapping(file->f_mapping); } +/* + * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages + * beyond i_size's notion of EOF, which fallocate has committed to reserving: + * which split_huge_page() must therefore not delete. This use of a single + * "fallocend" per inode errs on the side of not deleting a reservation when + * in doubt: there are plenty of cases when it preserves unreserved pages. + */ +static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) +{ + return max(eof, SHMEM_I(inode)->fallocend); +} + extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); +#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_SHMEM -extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, +extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + bool zeropage, struct page **pagep); -extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, - pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr); -#else -#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ - src_addr, pagep) ({ BUG(); 0; }) -#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \ - dst_addr) ({ BUG(); 0; }) -#endif +#else /* !CONFIG_SHMEM */ +#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ + src_addr, zeropage, pagep) ({ BUG(); 0; }) +#endif /* CONFIG_SHMEM */ +#endif /* CONFIG_USERFAULTFD */ #endif diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 1eac79ce57d4..9814fff58a69 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,7 +4,7 @@ /* * This struct is used to pass information from page reclaim to the shrinkers. - * We consolidate the values for easier extention later. + * We consolidate the values for easier extension later. * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. diff --git a/include/linux/signal.h b/include/linux/signal.h index 201f88e3738b..3f96a6374e4f 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -44,7 +44,7 @@ enum siginfo_layout { SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, - SIL_PERF_EVENT, + SIL_FAULT_PERF_EVENT, SIL_CHLD, SIL_RT, SIL_SYS, @@ -267,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); -extern void exit_task_sigqueue_cache(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) @@ -463,8 +462,6 @@ int __save_altstack(stack_t __user *, unsigned long); unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ - if (t->sas_ss_flags & SS_AUTODISARM) \ - sas_ss_reset(t); \ } while (0); #ifdef CONFIG_PROC_FS diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 68e06c75c5b2..34cb28b8f16c 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -13,6 +13,8 @@ typedef struct kernel_siginfo { __SIGINFO; } kernel_siginfo_t; +struct ucounts; + /* * Real Time signals may be queued. */ @@ -21,7 +23,7 @@ struct sigqueue { struct list_head list; int flags; kernel_siginfo_t info; - struct user_struct *user; + struct ucounts *ucounts; }; /* flags values. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dbf820a50a39..6bdb0db3e825 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,6 +37,7 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <net/flow.h> +#include <net/page_pool.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_common.h> #endif @@ -667,6 +668,8 @@ typedef unsigned char *sk_buff_data_t; * @head_frag: skb was allocated from page fragments, * not allocated by kmalloc() or vmalloc(). * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @pp_recycle: mark the packet for recycling instead of freeing (implies + * page_pool support on driver) * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed @@ -686,6 +689,7 @@ typedef unsigned char *sk_buff_data_t; * CHECKSUM_UNNECESSARY (max 3) * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB + * @slow_gro: state present at GRO time, slower prepare step required * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -791,10 +795,12 @@ struct sk_buff { fclone:2, peeked:1, head_frag:1, - pfmemalloc:1; + pfmemalloc:1, + pp_recycle:1; /* page_pool recycle indicator */ #ifdef CONFIG_SKB_EXTENSIONS __u8 active_extensions; #endif + /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -858,13 +864,14 @@ struct sk_buff { __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; #endif -#ifdef CONFIG_NET_REDIRECT __u8 redirected:1; +#ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif + __u8 slow_gro:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -985,6 +992,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } @@ -1001,6 +1009,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } @@ -1174,6 +1183,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); +struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, @@ -3081,12 +3091,20 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment + * @recycle: recycle the page if allocated via page_pool * - * Releases a reference on the paged fragment @frag. + * Releases a reference on the paged fragment @frag + * or recycles the page via the page_pool API. */ -static inline void __skb_frag_unref(skb_frag_t *frag) +static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - put_page(skb_frag_page(frag)); + struct page *page = skb_frag_page(frag); + +#ifdef CONFIG_PAGE_POOL + if (recycle && page_pool_return_skb_page(page)) + return; +#endif + put_page(page); } /** @@ -3098,7 +3116,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f]); + __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); } /** @@ -4203,6 +4221,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb) static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->slow_gro |= !!nfct; skb->_nfct = nfct; #endif } @@ -4362,6 +4381,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif + dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } @@ -4651,17 +4671,13 @@ static inline __wsum lco_csum(struct sk_buff *skb) static inline bool skb_is_redirected(const struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT return skb->redirected; -#else - return false; -#endif } static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 1; +#ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) skb->tstamp = 0; @@ -4670,9 +4686,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) static inline void skb_reset_redirect(struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 0; -#endif } static inline bool skb_csum_is_sctp(struct sk_buff *skb) @@ -4697,5 +4711,19 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) #endif } +#ifdef CONFIG_PAGE_POOL +static inline void skb_mark_for_recycle(struct sk_buff *skb) +{ + skb->pp_recycle = 1; +} +#endif + +static inline bool skb_pp_recycle(struct sk_buff *skb, void *data) +{ + if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) + return false; + return page_pool_return_skb_page(virt_to_page(data)); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index aba0f0f429be..14ab0c0bc924 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -126,8 +126,6 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); -int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags, - long timeo, int *err); int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); @@ -287,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline void sock_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + kfree_skb(skb); +} + +static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) +{ + if (msg->skb) + sock_drop(psock->sk, msg->skb); + kfree(msg); +} + static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); - list_add_tail(&msg->list, &psock->ingress_msg); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + list_add_tail(&msg->list, &psock->ingress_msg); + else + drop_sk_msg(psock, msg); spin_unlock_bh(&psock->ingress_lock); } @@ -348,7 +380,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err) struct sock *sk = psock->sk; sk->sk_err = err; - sk->sk_error_report(sk); + sk_error_report(sk); } struct sk_psock *sk_psock_init(struct sock *sk, int node); @@ -408,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk, psock->psock_update_sk_prot(sk, psock, true); } -static inline void sk_psock_set_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - set_bit(bit, &psock->state); -} - -static inline void sk_psock_clear_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - clear_bit(bit, &psock->state); -} - -static inline bool sk_psock_test_state(const struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - return test_bit(bit, &psock->state); -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; diff --git a/include/linux/slab.h b/include/linux/slab.h index 0c97d788762c..083f3ce550bc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -305,9 +305,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, /* * Whenever changing this, take care of that kmalloc_type() and * create_kmalloc_caches() still work as intended. + * + * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP + * is for accounted but unreclaimable and non-dma objects. All the other + * kmem caches can have both accounted and unaccounted objects. */ enum kmalloc_cache_type { KMALLOC_NORMAL = 0, +#ifndef CONFIG_ZONE_DMA + KMALLOC_DMA = KMALLOC_NORMAL, +#endif +#ifndef CONFIG_MEMCG_KMEM + KMALLOC_CGROUP = KMALLOC_NORMAL, +#else + KMALLOC_CGROUP, +#endif KMALLOC_RECLAIM, #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, @@ -319,24 +331,36 @@ enum kmalloc_cache_type { extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; +/* + * Define gfp bits that should not be set for KMALLOC_NORMAL. + */ +#define KMALLOC_NOT_NORMAL_BITS \ + (__GFP_RECLAIMABLE | \ + (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ + (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) + static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) { -#ifdef CONFIG_ZONE_DMA /* * The most common case is KMALLOC_NORMAL, so test for it - * with a single branch for both flags. + * with a single branch for all the relevant flags. */ - if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) + if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) return KMALLOC_NORMAL; /* - * At least one of the flags has to be set. If both are, __GFP_DMA - * is more important. + * At least one of the flags has to be set. Their priorities in + * decreasing order are: + * 1) __GFP_DMA + * 2) __GFP_RECLAIMABLE + * 3) __GFP_ACCOUNT */ - return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; -#else - return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; -#endif + if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) + return KMALLOC_DMA; + if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE)) + return KMALLOC_RECLAIM; + else + return KMALLOC_CGROUP; } /* @@ -346,8 +370,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) * 1 = 65 .. 96 bytes * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n + * + * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; + * typical usage is via kmalloc_index() and therefore evaluated at compile-time. + * Callers where !size_is_constant should only be test modules, where runtime + * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). */ -static __always_inline unsigned int kmalloc_index(size_t size) +static __always_inline unsigned int __kmalloc_index(size_t size, + bool size_is_constant) { if (!size) return 0; @@ -382,12 +412,17 @@ static __always_inline unsigned int kmalloc_index(size_t size) if (size <= 8 * 1024 * 1024) return 23; if (size <= 16 * 1024 * 1024) return 24; if (size <= 32 * 1024 * 1024) return 25; - if (size <= 64 * 1024 * 1024) return 26; - BUG(); + + if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000) + && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) + BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); + else + BUG(); /* Will never be reached. Needed because the compiler may complain */ return -1; } +#define kmalloc_index(s) __kmalloc_index(s, true) #endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index dcde82a4434c..85499f0586b0 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,6 +10,7 @@ #include <linux/kfence.h> #include <linux/kobject.h> #include <linux/reciprocal_div.h> +#include <linux/local_lock.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -40,6 +41,10 @@ enum stat_item { CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ NR_SLUB_STAT_ITEMS }; +/* + * When changing the layout, make sure freelist and tid are still compatible + * with this_cpu_cmpxchg_double() alignment requirements. + */ struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ @@ -47,6 +52,7 @@ struct kmem_cache_cpu { #ifdef CONFIG_SLUB_CPU_PARTIAL struct page *partial; /* Partially allocated frozen slabs */ #endif + local_lock_t lock; /* Protects the fields above */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h new file mode 100644 index 000000000000..88bd8de0e803 --- /dev/null +++ b/include/linux/soc/ixp4xx/cpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IXP4XX cpu type detection + * + * Copyright (C) 2007 MontaVista Software, Inc. + */ + +#ifndef __SOC_IXP4XX_CPU_H__ +#define __SOC_IXP4XX_CPU_H__ + +#include <linux/io.h> +#ifdef CONFIG_ARM +#include <asm/cputype.h> +#endif + +/* Processor id value in CP15 Register 0 */ +#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */ +#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0 + +#define IXP43X_PROCESSOR_ID_VALUE 0x69054040 +#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0 + +#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */ +#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0 + +/* "fuse" bits of IXP_EXP_CFG2 */ +/* All IXP4xx CPUs */ +#define IXP4XX_FEATURE_RCOMP (1 << 0) +#define IXP4XX_FEATURE_USB_DEVICE (1 << 1) +#define IXP4XX_FEATURE_HASH (1 << 2) +#define IXP4XX_FEATURE_AES (1 << 3) +#define IXP4XX_FEATURE_DES (1 << 4) +#define IXP4XX_FEATURE_HDLC (1 << 5) +#define IXP4XX_FEATURE_AAL (1 << 6) +#define IXP4XX_FEATURE_HSS (1 << 7) +#define IXP4XX_FEATURE_UTOPIA (1 << 8) +#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9) +#define IXP4XX_FEATURE_NPEC_ETH (1 << 10) +#define IXP4XX_FEATURE_RESET_NPEA (1 << 11) +#define IXP4XX_FEATURE_RESET_NPEB (1 << 12) +#define IXP4XX_FEATURE_RESET_NPEC (1 << 13) +#define IXP4XX_FEATURE_PCI (1 << 14) +#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16) +#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22) +#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \ + IXP4XX_FEATURE_USB_DEVICE | \ + IXP4XX_FEATURE_HASH | \ + IXP4XX_FEATURE_AES | \ + IXP4XX_FEATURE_DES | \ + IXP4XX_FEATURE_HDLC | \ + IXP4XX_FEATURE_AAL | \ + IXP4XX_FEATURE_HSS | \ + IXP4XX_FEATURE_UTOPIA | \ + IXP4XX_FEATURE_NPEB_ETH0 | \ + IXP4XX_FEATURE_NPEC_ETH | \ + IXP4XX_FEATURE_RESET_NPEA | \ + IXP4XX_FEATURE_RESET_NPEB | \ + IXP4XX_FEATURE_RESET_NPEC | \ + IXP4XX_FEATURE_PCI | \ + IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \ + IXP4XX_FEATURE_XSCALE_MAX_FREQ) + + +/* IXP43x/46x CPUs */ +#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15) +#define IXP4XX_FEATURE_USB_HOST (1 << 18) +#define IXP4XX_FEATURE_NPEA_ETH (1 << 19) +#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \ + IXP4XX_FEATURE_ECC_TIMESYNC | \ + IXP4XX_FEATURE_USB_HOST | \ + IXP4XX_FEATURE_NPEA_ETH) + +/* IXP46x CPU (including IXP455) only */ +#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20) +#define IXP4XX_FEATURE_RSA (1 << 21) +#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \ + IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \ + IXP4XX_FEATURE_RSA) + +#ifdef CONFIG_ARCH_IXP4XX +#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \ + IXP42X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \ + IXP42X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \ + IXP43X_PROCESSOR_ID_VALUE) +#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \ + IXP46X_PROCESSOR_ID_VALUE) + +u32 ixp4xx_read_feature_bits(void); +void ixp4xx_write_feature_bits(u32 value); +#else +#define cpu_is_ixp42x_rev_a0() 0 +#define cpu_is_ixp42x() 0 +#define cpu_is_ixp43x() 0 +#define cpu_is_ixp46x() 0 +static inline u32 ixp4xx_read_feature_bits(void) +{ + return 0; +} +static inline void ixp4xx_write_feature_bits(u32 value) +{ +} +#endif + +#endif /* _ASM_ARCH_CPU_H */ diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h index 28c04d918f0f..fa1d6af0164e 100644 --- a/include/linux/soc/marvell/octeontx2/asm.h +++ b/include/linux/soc/marvell/octeontx2/asm.h @@ -22,12 +22,17 @@ : [rs]"r" (ioaddr)); \ (result); \ }) +/* + * STEORL store to memory with release semantics. + * This will avoid using DMB barrier after each LMTST + * operation. + */ #define cn10k_lmt_flush(val, addr) \ ({ \ __asm__ volatile(".cpu generic+lse\n" \ - "steor %x[rf],[%[rs]]" \ - : [rf]"+r"(val) \ - : [rs]"r"(addr)); \ + "steorl %x[rf],[%[rs]]" \ + : [rf] "+r"(val) \ + : [rs] "r"(addr)); \ }) #else #define otx2_lmt_flush(ioaddr) ({ 0; }) diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index f2645ec52520..60e66fc9b6bf 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -29,6 +29,7 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_NCPB 0x6270636E #define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f #define QCOM_SMD_RPM_QPIC_CLK 0x63697071 +#define QCOM_SMD_RPM_QUP_CLK 0x707571 #define QCOM_SMD_RPM_SMPA 0x61706d73 #define QCOM_SMD_RPM_SMPB 0x62706d73 #define QCOM_SMD_RPM_SPDM 0x63707362 diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 63ad8cddad14..652c0158baac 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -14,6 +14,7 @@ struct qcom_smem_state_ops { #ifdef CONFIG_QCOM_SMEM_STATE struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); +struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value); @@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, return ERR_PTR(-EINVAL); } +static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, + const char *con_id, + unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + static inline void qcom_smem_state_put(struct qcom_smem_state *state) { } diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index fc9250fb3133..aa840ed043e1 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -611,12 +611,6 @@ #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 -/* For EXYNOS_CENTRAL_SEQ_OPTION */ -#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16) -#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17) -#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24) -#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25) - #define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4) #define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5) #define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6) diff --git a/include/linux/socket.h b/include/linux/socket.h index b8fc5c53ba6f..041d6032a348 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -223,8 +223,11 @@ struct ucred { * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ +#define AF_MCTP 45 /* Management component + * transport protocol + */ -#define AF_MAX 45 /* For now.. */ +#define AF_MAX 46 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -274,6 +277,7 @@ struct ucred { #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC #define PF_XDP AF_XDP +#define PF_MCTP AF_MCTP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -421,6 +425,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags, unsigned long nofile); +extern struct file *do_accept(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); @@ -438,6 +445,4 @@ extern int __sys_socketpair(int family, int type, int protocol, int __user *usockvec); extern int __sys_shutdown_sock(struct socket *sock, int how); extern int __sys_shutdown(int fd, int how); - -extern struct ns_common *get_net_ns(struct ns_common *ns); #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index ced07f8fde87..76ce3f3ac0f2 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -612,6 +612,7 @@ struct sdw_bus_params { * @update_status: Update Slave status * @bus_config: Update the bus config for Slave * @port_prep: Prepare the port with parameters + * @clk_stop: handle imp-def sequences before and after prepare and de-prepare */ struct sdw_slave_ops { int (*read_prop)(struct sdw_slave *sdw); @@ -624,7 +625,6 @@ struct sdw_slave_ops { int (*port_prep)(struct sdw_slave *slave, struct sdw_prepare_ch *prepare_ch, enum sdw_port_prep_ops pre_ops); - int (*get_clk_stop_mode)(struct sdw_slave *slave); int (*clk_stop)(struct sdw_slave *slave, enum sdw_clk_stop_mode mode, enum sdw_clk_stop_type type); @@ -661,6 +661,8 @@ struct sdw_slave_ops { * initialized * @first_interrupt_done: status flag tracking if the interrupt handling * for a Slave happens for the first time after enumeration + * @is_mockup_device: status flag used to squelch errors in the command/control + * protocol for SoundWire mockup devices */ struct sdw_slave { struct sdw_slave_id id; @@ -675,7 +677,6 @@ struct sdw_slave { struct list_head node; struct completion port_ready[SDW_MAX_PORTS]; unsigned int m_port_map[SDW_MAX_PORTS]; - enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; bool probed; @@ -684,6 +685,7 @@ struct sdw_slave { struct completion initialization_complete; u32 unattach_request; bool first_interrupt_done; + bool is_mockup_device; }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) @@ -1040,7 +1042,10 @@ int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value); int sdw_read_no_pm(struct sdw_slave *slave, u32 addr); int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); -int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val); +int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val); +int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val); + int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id); void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id); diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 3a5446ac014a..8a463b8fc12a 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -7,6 +7,85 @@ #include <linux/irqreturn.h> #include <linux/soundwire/sdw.h> +#define SDW_SHIM_BASE 0x2C000 +#define SDW_ALH_BASE 0x2C800 +#define SDW_LINK_BASE 0x30000 +#define SDW_LINK_SIZE 0x10000 + +/* Intel SHIM Registers Definition */ +#define SDW_SHIM_LCAP 0x0 +#define SDW_SHIM_LCTL 0x4 +#define SDW_SHIM_IPPTR 0x8 +#define SDW_SHIM_SYNC 0xC + +#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x)) +#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x)) +#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x)) +#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x)) +#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x)) +#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x)) + +#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x)) +#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x)) +#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x)) + +#define SDW_SHIM_WAKEEN 0x190 +#define SDW_SHIM_WAKESTS 0x192 + +#define SDW_SHIM_LCTL_SPA BIT(0) +#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) +#define SDW_SHIM_LCTL_CPA BIT(8) +#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) + +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) +#define SDW_SHIM_SYNC_SYNCCPU BIT(15) +#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) +#define SDW_SHIM_SYNC_CMDSYNC BIT(16) +#define SDW_SHIM_SYNC_SYNCGO BIT(24) + +#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8) + +#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0) +#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4) +#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8) +#define SDW_SHIM_PCMSYCM_DIR BIT(15) + +#define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8) +#define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13) + +#define SDW_SHIM_IOCTL_MIF BIT(0) +#define SDW_SHIM_IOCTL_CO BIT(1) +#define SDW_SHIM_IOCTL_COE BIT(2) +#define SDW_SHIM_IOCTL_DO BIT(3) +#define SDW_SHIM_IOCTL_DOE BIT(4) +#define SDW_SHIM_IOCTL_BKE BIT(5) +#define SDW_SHIM_IOCTL_WPDD BIT(6) +#define SDW_SHIM_IOCTL_CIBD BIT(8) +#define SDW_SHIM_IOCTL_DIBD BIT(9) + +#define SDW_SHIM_CTMCTL_DACTQE BIT(0) +#define SDW_SHIM_CTMCTL_DODS BIT(1) +#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3) + +#define SDW_SHIM_WAKEEN_ENABLE BIT(0) +#define SDW_SHIM_WAKESTS_STATUS BIT(0) + +/* Intel ALH Register definitions */ +#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x))) +#define SDW_ALH_NUM_STREAMS 64 + +#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3 +#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) +#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16) + /** * struct sdw_intel_stream_params_data: configuration passed during * the @params_stream callback, e.g. for interaction with DSP @@ -58,7 +137,7 @@ struct sdw_intel_acpi_info { u32 link_mask; }; -struct sdw_intel_link_res; +struct sdw_intel_link_dev; /* Intel clock-stop/pm_runtime quirk definitions */ @@ -109,13 +188,15 @@ struct sdw_intel_slave_id { * Controller * @num_slaves: total number of devices exposed across all enabled links * @handle: ACPI parent handle - * @links: information for each link (controller-specific and kept + * @ldev: information for each link (controller-specific and kept * opaque here) * @ids: array of slave_id, representing Slaves exposed across all enabled * links * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. * @shim_mask: flags to track initialization of SHIM shared registers + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_ctx { int count; @@ -123,11 +204,13 @@ struct sdw_intel_ctx { u32 link_mask; int num_slaves; acpi_handle handle; - struct sdw_intel_link_res *links; + struct sdw_intel_link_dev **ldev; struct sdw_intel_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ u32 shim_mask; + u32 shim_base; + u32 alh_base; }; /** @@ -146,6 +229,8 @@ struct sdw_intel_ctx { * machine-specific quirks are handled in the DSP driver. * @clock_stop_quirks: mask array of possible behaviors requested by the * DSP driver. The quirks are common for all links for now. + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_res { int count; @@ -157,6 +242,8 @@ struct sdw_intel_res { struct device *dev; u32 link_mask; u32 clock_stop_quirks; + u32 shim_base; + u32 alh_base; }; /* diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 433c20e2f46e..21449067aedb 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h @@ -2,7 +2,7 @@ #ifndef LINUX_SPI_MAX7301_H #define LINUX_SPI_MAX7301_H -#include <linux/gpio.h> +#include <linux/gpio/driver.h> /* * Some registers must be read back to modify. diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 31f00c7f4f59..eaab121ee575 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -2,8 +2,10 @@ /* * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs */ -#ifndef __linux_pxa2xx_spi_h -#define __linux_pxa2xx_spi_h +#ifndef __LINUX_SPI_PXA2XX_SPI_H +#define __LINUX_SPI_PXA2XX_SPI_H + +#include <linux/types.h> #include <linux/pxa2xx_ssp.h> @@ -12,7 +14,10 @@ struct dma_chan; -/* device.platform_data for SSP controller devices */ +/* + * The platform data for SSP controller devices + * (resides in device.platform_data). + */ struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; @@ -28,8 +33,11 @@ struct pxa2xx_spi_controller { struct ssp_device ssp; }; -/* spi_board_info.controller_data for SPI slave devices, - * copied to spi_device.platform_data ... mostly for dma tuning +/* + * The controller specific data for SPI slave devices + * (resides in spi_board_info.controller_data), + * copied to spi_device.platform_data ... mostly for + * DMA tuning. */ struct pxa2xx_spi_chip { u8 tx_threshold; @@ -49,4 +57,5 @@ struct pxa2xx_spi_chip { extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); #endif -#endif + +#endif /* __LINUX_SPI_PXA2XX_SPI_H */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 2b65c9edc34e..85e2ff7b840d 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -250,6 +250,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) * the currently mapped area), and the caller of * spi_mem_dirmap_write() is responsible for calling it again in * this case. + * @poll_status: poll memory device status until (status & mask) == match or + * when the timeout has expired. It fills the data buffer with + * the last status value. * * This interface should be implemented by SPI controllers providing an * high-level interface to execute SPI memory operation, which is usually the @@ -274,6 +277,12 @@ struct spi_controller_mem_ops { u64 offs, size_t len, void *buf); ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); + int (*poll_status)(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_rate_us, + unsigned long timeout_ms); }; /** @@ -369,6 +378,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, void devm_spi_mem_dirmap_destroy(struct device *dev, struct spi_mem_dirmap_desc *desc); +int spi_mem_poll_status(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_delay_us, + u16 timeout_ms); + int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 74239d65c7fd..8371bca13729 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer - * + * @cs_setup: delay to be introduced by the controller after CS is asserted + * @cs_hold: delay to be introduced by the controller before CS is deasserted + * @cs_inactive: delay to be introduced by the controller after CS is + * deasserted. If @cs_change_delay is used from @spi_transfer, then the + * two delays will be added up. * @statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave @@ -188,6 +192,10 @@ struct spi_device { int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ struct spi_delay word_delay; /* inter-word delay */ + /* CS delays */ + struct spi_delay cs_setup; + struct spi_delay cs_hold; + struct spi_delay cs_inactive; /* the statistics */ struct spi_statistics statistics; @@ -299,6 +307,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); + /* use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) @@ -337,6 +347,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver * @slave: indicates that this is an SPI slave controller + * @devm_allocated: whether the allocation of this struct is devres-managed * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @max_message_size: function that returns the max message size for @@ -410,11 +421,6 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_setup: delay to be introduced by the controller after CS is asserted - * @cs_hold: delay to be introduced by the controller before CS is deasserted - * @cs_inactive: delay to be introduced by the controller after CS is - * deasserted. If @cs_change_delay is used from @spi_transfer, then the - * two delays will be added up. * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per * CS number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods @@ -509,7 +515,7 @@ struct spi_controller { #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - /* flag indicating this is a non-devres managed controller */ + /* flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; /* flag indicating this is an SPI slave controller */ @@ -548,8 +554,7 @@ struct spi_controller { * to configure specific CS timing through spi_set_cs_timing() after * spi_setup(). */ - int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup, - struct spi_delay *hold, struct spi_delay *inactive); + int (*set_cs_timing)(struct spi_device *spi); /* bidirectional bulk transfers * @@ -586,6 +591,7 @@ struct spi_controller { bool (*can_dma)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer); + struct device *dma_map_dev; /* * These hooks are for drivers that want to use the generic @@ -635,11 +641,6 @@ struct spi_controller { /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; - /* CS delays */ - struct spi_delay cs_setup; - struct spi_delay cs_hold; - struct spi_delay cs_inactive; - /* gpio chip select */ int *cs_gpios; struct gpio_desc **cs_gpiods; @@ -1108,11 +1109,6 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } -extern int spi_set_cs_timing(struct spi_device *spi, - struct spi_delay *setup, - struct spi_delay *hold, - struct spi_delay *inactive); - extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc..45310ea1b1d7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -12,6 +12,8 @@ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * + * linux/spinlock_types_raw: + * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -31,6 +33,8 @@ * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * + * linux/spinlock_types_raw: + * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -/* Include rwlock functions */ +#ifndef CONFIG_PREEMPT_RT +/* Include rwlock functions for !RT */ #include <linux/rwlock.h> +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include <linux/spinlock_api_up.h> #endif +/* Non PREEMPT_RT kernel, map to raw spinlocks: */ +#ifndef CONFIG_PREEMPT_RT + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#else /* !CONFIG_PREEMPT_RT */ +# include <linux/spinlock_rt.h> +#endif /* CONFIG_PREEMPT_RT */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 19a9be9d97ee..6b8e1a0b137b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } +/* PREEMPT_RT has its own rwlock implementation */ +#ifndef CONFIG_PREEMPT_RT #include <linux/rwlock_api_smp.h> +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 000000000000..835aedaf68ac --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu); +#else +static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu) +{ +} +#endif + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ +} while (0) + +extern void rt_spin_lock(spinlock_t *lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); +extern void rt_spin_unlock(spinlock_t *lock); +extern void rt_spin_lock_unlock(spinlock_t *lock); +extern int rt_spin_trylock_bh(spinlock_t *lock); +extern int rt_spin_trylock(spinlock_t *lock); + +static __always_inline void spin_lock(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#ifdef CONFIG_LOCKDEP +# define __spin_lock_nested(lock, subclass) \ + rt_spin_lock_nested(lock, subclass) + +# define __spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __spin_lock_nested(lock, subclass); \ + } while (0) + +#else + /* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(((void)(subclass), (lock)), flags) +#endif + +#define spin_lock_nested(lock, subclass) \ + __spin_lock_nested(lock, subclass) + +#define spin_lock_nest_lock(lock, nest_lock) \ + __spin_lock_nest_lock(lock, nest_lock) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + __spin_lock_irqsave_nested(lock, flags, subclass) + +static __always_inline void spin_lock_bh(spinlock_t *lock) +{ + /* Investigate: Drop bh when blocking ? */ + local_bh_disable(); + rt_spin_lock(lock); +} + +static __always_inline void spin_lock_irq(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static __always_inline void spin_unlock(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_bh(spinlock_t *lock) +{ + rt_spin_unlock(lock); + local_bh_enable(); +} + +static __always_inline void spin_unlock_irq(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, + unsigned long flags) +{ + rt_spin_unlock(lock); +} + +#define spin_trylock(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock_bh(lock) \ + __cond_lock(lock, rt_spin_trylock_bh(lock)) + +#define spin_trylock_irq(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define __spin_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = spin_trylock(lock); \ + __locked; \ +}) + +#define spin_trylock_irqsave(lock, flags) \ + __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) + +#define spin_is_contended(lock) (((void)(lock), 0)) + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_base_is_locked(&lock->lock); +} + +#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock)) + +#include <linux/rwlock_rt.h> + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index b981caafe8bf..2dfa35ffec76 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,65 +9,11 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include <asm/spinlock_types.h> -#else -# include <linux/spinlock_types_up.h> -#endif - -#include <linux/lockdep_types.h> +#include <linux/spinlock_types_raw.h> -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RAW_SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SPIN, \ - } -# define SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RAW_SPIN_DEP_MAP_INIT(lockname) -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - RAW_SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +#ifndef CONFIG_PREEMPT_RT +/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ typedef struct spinlock { union { struct raw_spinlock rlock; @@ -96,6 +42,35 @@ typedef struct spinlock { #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +/* PREEMPT_RT kernels map spinlock to rt_mutex */ +#include <linux/rtmutex.h> + +typedef struct spinlock { + struct rt_mutex_base lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#define __SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) \ + } + +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 000000000000..91cb36b65a17 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,73 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#include <linux/types.h> + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep_types.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SPIN, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } +#else +# define RAW_SPIN_DEP_MAP_INIT(lockname) +# define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +{ \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + RAW_SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71ce..e6011a9975af 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 0e0cf4d6a72a..6cfaa0a9a9b9 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) int idx; idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; - WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); + WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); return idx; } @@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, { int idx; - idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; + idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, - READ_ONCE(ssp->srcu_lock_nesting[!idx]), - READ_ONCE(ssp->srcu_lock_nesting[idx])); + data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), + data_race(READ_ONCE(ssp->srcu_lock_nesting[idx]))); } #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfcc8a756ae..cb1f4351e8ba 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -82,9 +82,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 0d5a2691e7e9..f9b53acb4e02 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h index 3f8bc973d67d..19253bfacd1a 100644 --- a/include/linux/ssb/ssb_driver_extif.h +++ b/include/linux/ssb/ssb_driver_extif.h @@ -197,7 +197,7 @@ struct ssb_extif { static inline bool ssb_extif_available(struct ssb_extif *extif) { - return 0; + return false; } static inline diff --git a/include/linux/stat.h b/include/linux/stat.h index fff27e603814..7df06931f25d 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -34,6 +34,10 @@ struct kstat { STATX_ATTR_ENCRYPTED | \ STATX_ATTR_VERITY \ )/* Attrs corresponding to FS_*_FL flags */ +#define KSTAT_ATTR_VFS_FLAGS \ + (STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND \ + ) /* Attrs corresponding to S_* flags that are enforced by the VFS */ u64 ino; dev_t dev; dev_t rdev; diff --git a/include/linux/static_call.h b/include/linux/static_call.h index fc94faa53b5b..3e56a9751c06 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -17,11 +17,17 @@ * DECLARE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL_NULL(name, typename); + * DEFINE_STATIC_CALL_RET0(name, typename); + * + * __static_call_return0; + * * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); * static_call_query(name); * + * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}() + * * Usage example: * * # Start with the following functions (with identical prototypes): @@ -96,6 +102,33 @@ * To query which function is currently set to be called, use: * * func = static_call_query(name); + * + * + * DEFINE_STATIC_CALL_RET0 / __static_call_return0: + * + * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the + * conditional void function call, DEFINE_STATIC_CALL_RET0 / + * __static_call_return0 optimize the do nothing return 0 function. + * + * This feature is strictly UB per the C standard (since it casts a function + * pointer to a different signature) and relies on the architecture ABI to + * make things work. In particular it relies on Caller Stack-cleanup and the + * whole return register being clobbered for short return values. All normal + * CDECL style ABIs conform. + * + * In particular the x86_64 implementation replaces the 5 byte CALL + * instruction at the callsite with a 5 byte clear of the RAX register, + * completely eliding any function call overhead. + * + * Notably argument setup is unconditional. + * + * + * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP(): + * + * The difference is that the _TRAMP variant tries to only export the + * trampoline with the result that a module can use static_call{,_cond}() but + * not static_call_update(). + * */ #include <linux/types.h> diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h new file mode 100644 index 000000000000..c8dc7f4f390c --- /dev/null +++ b/include/linux/stdarg.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#ifndef _LINUX_STDARG_H +#define _LINUX_STDARG_H + +typedef __builtin_va_list va_list; +#define va_start(v, l) __builtin_va_start(v, l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v, T) __builtin_va_arg(v, T) +#define va_copy(d, s) __builtin_va_copy(d, s) + +#endif diff --git a/include/linux/stm.h b/include/linux/stm.h index c6f577ab6f21..3b22689512be 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -57,7 +57,7 @@ struct stm_device; * * Normally, an STM device will have a range of masters available to software * and the rest being statically assigned to various hardware trace sources. - * The former is defined by the the range [@sw_start..@sw_end] of the device + * The former is defined by the range [@sw_start..@sw_end] of the device * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 0db36360ef21..a6f03b36fc4f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,7 +115,9 @@ struct stmmac_axi { #define EST_GCL 1024 struct stmmac_est { + struct mutex lock; int enable; + u32 btr_reserve[2]; u32 btr_offset[2]; u32 btr[2]; u32 ctr[2]; @@ -172,6 +174,18 @@ struct stmmac_fpe_cfg { enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ }; +struct stmmac_safety_feature_cfg { + u32 tsoee; + u32 mrxpee; + u32 mestee; + u32 mrxee; + u32 mtxee; + u32 epsi; + u32 edpp; + u32 prtyen; + u32 tmouten; +}; + struct plat_stmmacenet_data { int bus_id; int phy_addr; @@ -184,6 +198,7 @@ struct plat_stmmacenet_data { struct stmmac_dma_cfg *dma_cfg; struct stmmac_est *est; struct stmmac_fpe_cfg *fpe_cfg; + struct stmmac_safety_feature_cfg *safety_feat_cfg; int clk_csr; int has_gmac; int enh_desc; @@ -210,6 +225,7 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*serdes_powerup)(struct net_device *ndev, void *priv); void (*serdes_powerdown)(struct net_device *ndev, void *priv); + void (*speed_mode_2500)(struct net_device *ndev, void *priv); void (*ptp_clk_freq_config)(void *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); @@ -223,8 +239,10 @@ struct plat_stmmacenet_data { struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; unsigned int clk_ref_rate; + unsigned int mult_fact_100ns; s32 ptp_max_adj; struct reset_control *stmmac_rst; + struct reset_control *stmmac_ahb_rst; struct stmmac_axi *axi; int has_gmac4; bool has_sun8i; @@ -249,5 +267,6 @@ struct plat_stmmacenet_data { int msi_sfty_ue_vec; int msi_rx_base_vec; int msi_tx_base_vec; + bool use_phy_wol; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index 9521d8cab18e..5e96d656be7a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -2,12 +2,11 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ - #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ #include <linux/errno.h> /* for E2BIG */ -#include <stdarg.h> +#include <linux/stdarg.h> #include <uapi/linux/string.h> extern char *strndup_user(const char __user *, long); @@ -184,12 +183,6 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int kstrtobool(const char *s, bool *res); -static inline int strtobool(const char *s, bool *res) -{ - return kstrtobool(s, res); -} - int match_string(const char * const *array, size_t n, const char *string); int __sysfs_match_string(const char * const *array, size_t n, const char *s); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index fa06dcdc481e..68189c4a2eb1 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_HELPERS_H_ #define _LINUX_STRING_HELPERS_H_ +#include <linux/bits.h> #include <linux/ctype.h> #include <linux/types.h> @@ -18,13 +19,15 @@ enum string_size_units { void string_get_size(u64 size, u64 blk_size, enum string_size_units units, char *buf, int len); -#define UNESCAPE_SPACE 0x01 -#define UNESCAPE_OCTAL 0x02 -#define UNESCAPE_HEX 0x04 -#define UNESCAPE_SPECIAL 0x08 +#define UNESCAPE_SPACE BIT(0) +#define UNESCAPE_OCTAL BIT(1) +#define UNESCAPE_HEX BIT(2) +#define UNESCAPE_SPECIAL BIT(3) #define UNESCAPE_ANY \ (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) +#define UNESCAPE_ALL_MASK GENMASK(3, 0) + int string_unescape(char *src, char *dst, size_t size, unsigned int flags); static inline int string_unescape_inplace(char *buf, unsigned int flags) @@ -42,22 +45,24 @@ static inline int string_unescape_any_inplace(char *buf) return string_unescape_any(buf, buf, 0); } -#define ESCAPE_SPACE 0x01 -#define ESCAPE_SPECIAL 0x02 -#define ESCAPE_NULL 0x04 -#define ESCAPE_OCTAL 0x08 +#define ESCAPE_SPACE BIT(0) +#define ESCAPE_SPECIAL BIT(1) +#define ESCAPE_NULL BIT(2) +#define ESCAPE_OCTAL BIT(3) #define ESCAPE_ANY \ (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL) -#define ESCAPE_NP 0x10 +#define ESCAPE_NP BIT(4) #define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) -#define ESCAPE_HEX 0x20 +#define ESCAPE_HEX BIT(5) +#define ESCAPE_NA BIT(6) +#define ESCAPE_NAP BIT(7) +#define ESCAPE_APPEND BIT(8) + +#define ESCAPE_ALL_MASK GENMASK(8, 0) int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *only); -int string_escape_mem_ascii(const char *src, size_t isz, char *dst, - size_t osz); - static inline int string_escape_mem_any_np(const char *src, size_t isz, char *dst, size_t osz, const char *only) { diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index d0965e2997b0..b134b2b3371c 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -14,6 +14,7 @@ #include <linux/kref.h> #include <linux/slab.h> #include <linux/atomic.h> +#include <linux/kstrtox.h> #include <linux/proc_fs.h> /* diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 02e7a5863d28..a4661646adc9 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -14,6 +14,7 @@ #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> +#include <linux/refcount.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/sched.h> @@ -29,12 +30,13 @@ #include <linux/sunrpc/xprtmultipath.h> struct rpc_inode; +struct rpc_sysfs_client; /* * The high-level client handle */ struct rpc_clnt { - atomic_t cl_count; /* Number of references */ + refcount_t cl_count; /* Number of references */ unsigned int cl_clid; /* client id */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ @@ -71,6 +73,7 @@ struct rpc_clnt { #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif + struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */ /* cl_work is only needed after cl_xpi is no longer used, * and that are of similar size */ @@ -79,6 +82,7 @@ struct rpc_clnt { struct work_struct cl_work; }; const struct cred *cl_cred; + unsigned int cl_max_connect; /* max number of transports not to the same IP */ }; /* @@ -133,6 +137,7 @@ struct rpc_create_args { char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ const struct cred *cred; + unsigned int max_connect; }; struct rpc_add_xprt_test { diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 938c2bf29db8..02117ed0fa2e 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -20,6 +20,7 @@ enum rpc_auth_flavors { RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, + RPC_AUTH_TLS = 7, RPC_AUTH_MAXFLAVOR = 8, /* pseudoflavors: */ RPC_AUTH_GSS_KRB5 = 390003, diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index df696efdd675..a237b8dbf608 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -121,6 +121,7 @@ struct rpc_task_setup { */ #define RPC_TASK_ASYNC 0x0001 /* is an async task */ #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ +#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ @@ -139,6 +140,7 @@ struct rpc_task_setup { #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) +#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e91d51ea028b..064c96157d1f 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -19,6 +19,7 @@ #include <linux/sunrpc/svcauth.h> #include <linux/wait.h> #include <linux/mm.h> +#include <linux/pagevec.h> /* statistics for svc_pool structures */ struct svc_pool_stats { @@ -256,6 +257,7 @@ struct svc_rqst { struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ + struct pagevec rq_pvec; struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; @@ -275,13 +277,13 @@ struct svc_rqst { #define RQ_VICTIM (5) /* about to be shut down */ #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ -#define RQ_AUTHERR (8) /* Request status is auth error */ unsigned long rq_flags; /* flags field */ ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ void * rq_auth_data; /* flavor-specific data */ + __be32 rq_auth_stat; /* authentication status */ int rq_auth_slack; /* extra space xdr code * should leave in head * for krb5i, krb5p. @@ -502,6 +504,8 @@ struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node); +void svc_rqst_replace_page(struct svc_rqst *rqstp, + struct page *page); void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); unsigned int svc_pool_map_get(void); @@ -523,6 +527,7 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +const char * svc_proc_name(const struct svc_rqst *rqstp); int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); @@ -532,7 +537,6 @@ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); -__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err); __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *procinfo); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3184465de3a0..24aa159d29a7 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -90,9 +90,9 @@ struct svcxprt_rdma { struct ib_pd *sc_pd; spinlock_t sc_send_lock; - struct list_head sc_send_ctxts; + struct llist_head sc_send_ctxts; spinlock_t sc_rw_ctxt_lock; - struct list_head sc_rw_ctxts; + struct llist_head sc_rw_ctxts; u32 sc_pending_recvs; u32 sc_recv_batch; @@ -150,7 +150,7 @@ struct svc_rdma_recv_ctxt { }; struct svc_rdma_send_ctxt { - struct list_head sc_list; + struct llist_node sc_node; struct rpc_rdma_cid sc_cid; struct ib_send_wr sc_send_wr; @@ -207,6 +207,7 @@ extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status); +extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index b0003866a249..6d9cc9080aca 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -127,7 +127,7 @@ struct auth_ops { char * name; struct module *owner; int flavour; - int (*accept)(struct svc_rqst *rq, __be32 *authp); + int (*accept)(struct svc_rqst *rq); int (*release)(struct svc_rqst *rq); void (*domain_release)(struct auth_domain *); int (*set_client)(struct svc_rqst *rq); @@ -149,7 +149,7 @@ struct auth_ops { struct svc_xprt; -extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); +extern int svc_authenticate(struct svc_rqst *rqstp); extern int svc_authorise(struct svc_rqst *rqstp); extern int svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index a965cbc136ad..b519609af1d0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -95,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) #define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) #define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) +#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS) #define rpc_call cpu_to_be32(RPC_CALL) #define rpc_reply cpu_to_be32(RPC_REPLY) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 61b622e334ee..955ea4d7af0b 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -53,6 +53,7 @@ enum rpc_display_format_t { struct rpc_task; struct rpc_xprt; +struct xprt_class; struct seq_file; struct svc_serv; struct net; @@ -182,9 +183,11 @@ enum xprt_transports { XPRT_TRANSPORT_LOCAL = 257, }; +struct rpc_sysfs_xprt; struct rpc_xprt { struct kref kref; /* Reference count */ const struct rpc_xprt_ops *ops; /* transport methods */ + unsigned int id; /* transport id */ const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ @@ -285,9 +288,11 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ - atomic_t inject_disconnect; #endif struct rcu_head rcu; + const struct xprt_class *xprt_class; + struct rpc_sysfs_xprt *xprt_sysfs; + bool main; /*mark if this is the 1st transport */ }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) @@ -370,6 +375,7 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, void xprt_free(struct rpc_xprt *); void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); +void xprt_cleanup_ids(void); static inline int xprt_enable_swap(struct rpc_xprt *xprt) @@ -408,6 +414,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); +void xprt_release_write(struct rpc_xprt *, struct rpc_task *); /* * Reserved bit positions in xprt->state @@ -419,9 +426,12 @@ void xprt_unlock_connect(struct rpc_xprt *, void *); #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) +#define XPRT_OFFLINE (7) +#define XPRT_REMOVE (8) #define XPRT_CONGESTED (9) #define XPRT_CWND_WAIT (10) #define XPRT_WRITE_SPACE (11) +#define XPRT_SND_IS_COOKIE (12) static inline void xprt_set_connected(struct rpc_xprt *xprt) { @@ -492,21 +502,4 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -extern unsigned int rpc_inject_disconnect; -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ - if (!rpc_inject_disconnect) - return; - if (atomic_dec_return(&xprt->inject_disconnect)) - return; - atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); - xprt->ops->inject_disconnect(xprt); -} -#else -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ -} -#endif - #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index c6cce3fbf29d..bbb8a5fa0816 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -10,12 +10,15 @@ #define _NET_SUNRPC_XPRTMULTIPATH_H struct rpc_xprt_iter_ops; +struct rpc_sysfs_xprt_switch; struct rpc_xprt_switch { spinlock_t xps_lock; struct kref xps_kref; + unsigned int xps_id; unsigned int xps_nxprts; unsigned int xps_nactive; + unsigned int xps_nunique_destaddr_xprts; atomic_long_t xps_queuelen; struct list_head xps_xprt_list; @@ -23,6 +26,7 @@ struct rpc_xprt_switch { const struct rpc_xprt_iter_ops *xps_iter_ops; + struct rpc_sysfs_xprt_switch *xps_sysfs; struct rcu_head xps_rcu; }; @@ -71,4 +75,7 @@ extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, const struct sockaddr *sap); + +extern void xprt_multipath_cleanup_ids(void); + #endif diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 3c1423ee74b4..8c2a712cb242 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -10,6 +10,7 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); +unsigned short get_srcport(struct rpc_xprt *); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index 0806796eabcb..068e1982ad37 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -6,7 +6,7 @@ * managing access and communication to and from the SSAM EC, as well as main * communication structures and definitions. * - * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com> */ #ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H @@ -796,6 +796,20 @@ enum ssam_event_mask { SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) /** + * enum ssam_event_notifier_flags - Flags for event notifiers. + * @SSAM_EVENT_NOTIFIER_OBSERVER: + * The corresponding notifier acts as observer. Registering a notifier + * with this flag set will not attempt to enable any event. Equally, + * unregistering will not attempt to disable any event. Note that a + * notifier with this flag may not even correspond to a certain event at + * all, only to a specific event target category. Event matching will not + * be influenced by this flag. + */ +enum ssam_event_notifier_flags { + SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0), +}; + +/** * struct ssam_event_notifier - Notifier block for SSAM events. * @base: The base notifier block with callback function and priority. * @event: The event for which this block will receive notifications. @@ -803,6 +817,7 @@ enum ssam_event_mask { * @event.id: ID specifying the event. * @event.mask: Flags determining how events are matched to the notifier. * @event.flags: Flags used for enabling the event. + * @flags: Notifier flags (see &enum ssam_event_notifier_flags). */ struct ssam_event_notifier { struct ssam_notifier_block base; @@ -813,6 +828,8 @@ struct ssam_event_notifier { enum ssam_event_mask mask; u8 flags; } event; + + unsigned long flags; }; int ssam_notifier_register(struct ssam_controller *ctrl, @@ -821,4 +838,12 @@ int ssam_notifier_register(struct ssam_controller *ctrl, int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n); +int ssam_controller_event_enable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags); + +int ssam_controller_event_disable(struct ssam_controller *ctrl, + struct ssam_event_registry reg, + struct ssam_event_id id, u8 flags); + #endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */ diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 6ff9c58b3e17..f636c5310321 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -7,7 +7,7 @@ * Provides support for non-platform/non-ACPI SSAM clients via dedicated * subsystem. * - * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com> */ #ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h index 64276fbfa1d5..c3de43edcffa 100644 --- a/include/linux/surface_aggregator/serial_hub.h +++ b/include/linux/surface_aggregator/serial_hub.h @@ -6,7 +6,7 @@ * Surface System Aggregator Module (SSAM). Provides the interface for basic * packet- and request-based communication with the SSAM EC via SSH. * - * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com> + * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com> */ #ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H diff --git a/include/linux/swap.h b/include/linux/swap.h index 144727041e78..ba52f3a3478e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -62,12 +62,17 @@ static inline int current_is_kswapd(void) * migrate part of a process memory to device memory. * * When a page is migrated from CPU to device, we set the CPU page table entry - * to a special SWP_DEVICE_* entry. + * to a special SWP_DEVICE_{READ|WRITE} entry. + * + * When a page is mapped by the device for exclusive access we set the CPU page + * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. */ #ifdef CONFIG_DEVICE_PRIVATE -#define SWP_DEVICE_NUM 2 +#define SWP_DEVICE_NUM 4 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) +#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) +#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) #else #define SWP_DEVICE_NUM 0 #endif @@ -177,7 +182,6 @@ enum { SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ - SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ /* add others here before... */ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; @@ -240,6 +244,7 @@ struct swap_cluster_list { * The in-memory structure used to track swap areas. */ struct swap_info_struct { + struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ @@ -260,6 +265,7 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ + struct completion comp; /* seldom referenced */ #ifdef CONFIG_FRONTSWAP unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ atomic_t frontswap_pages; /* frontswap pages in-use counter */ @@ -402,7 +408,7 @@ static inline bool node_reclaim_enabled(void) extern void check_move_unevictable_pages(struct pagevec *pvec); -extern int kswapd_run(int nid); +extern void kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP @@ -445,6 +451,7 @@ extern void __delete_from_swap_cache(struct page *page, extern void delete_from_swap_cache(struct page *); extern void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); +extern void free_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -511,7 +518,7 @@ sector_t swap_page_sector(struct page *page); static inline void put_swap_device(struct swap_info_struct *si) { - rcu_read_unlock(); + percpu_ref_put(&si->users); } #else /* CONFIG_SWAP */ @@ -526,7 +533,20 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) return NULL; } -#define swap_address_space(entry) (NULL) +static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) +{ + return NULL; +} + +static inline void put_swap_device(struct swap_info_struct *si) +{ +} + +static inline struct address_space *swap_address_space(swp_entry_t entry) +{ + return NULL; +} + #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL @@ -541,12 +561,16 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); +static inline void free_swap_cache(struct page *page) +{ +} + static inline void show_swap_cache_info(void) { } -#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) -#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));}) +/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ +#define free_swap_and_cache(e) is_pfn_swap_entry(e) static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { @@ -697,7 +721,13 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); +extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); +static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) +{ + if (mem_cgroup_disabled()) + return; + __cgroup_throttle_swaprate(page, gfp_mask); +} #else static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) { @@ -706,8 +736,22 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) #ifdef CONFIG_MEMCG_SWAP extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); -extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); -extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); +extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); +static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) +{ + if (mem_cgroup_disabled()) + return 0; + return __mem_cgroup_try_charge_swap(page, entry); +} + +extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); +static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge_swap(entry, nr_pages); +} + extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else diff --git a/include/linux/swapops.h b/include/linux/swapops.h index d9b7c9132c2f..d356ab4047f7 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -23,6 +23,16 @@ #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) +/* Clear all flags but only keep swp_entry_t related information */ +static inline pte_t pte_swp_clear_flags(pte_t pte) +{ + if (pte_swp_soft_dirty(pte)) + pte = pte_swp_clear_soft_dirty(pte); + if (pte_swp_uffd_wp(pte)) + pte = pte_swp_clear_uffd_wp(pte); + return pte; +} + /* * Store a type+offset into a swp_entry_t in an arch-independent format */ @@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) { swp_entry_t arch_entry; - if (pte_swp_soft_dirty(pte)) - pte = pte_swp_clear_soft_dirty(pte); - if (pte_swp_uffd_wp(pte)) - pte = pte_swp_clear_uffd_wp(pte); + pte = pte_swp_clear_flags(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -100,10 +107,14 @@ static inline void *swp_to_radix_entry(swp_entry_t entry) } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) -static inline swp_entry_t make_device_private_entry(struct page *page, bool write) +static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) +{ + return swp_entry(SWP_DEVICE_READ, offset); +} + +static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) { - return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, - page_to_pfn(page)); + return swp_entry(SWP_DEVICE_WRITE, offset); } static inline bool is_device_private_entry(swp_entry_t entry) @@ -112,33 +123,40 @@ static inline bool is_device_private_entry(swp_entry_t entry) return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; } -static inline void make_device_private_entry_read(swp_entry_t *entry) +static inline bool is_writable_device_private_entry(swp_entry_t entry) { - *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); + return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } -static inline bool is_write_device_private_entry(swp_entry_t entry) +static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) { - return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); + return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); } -static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) { - return swp_offset(entry); + return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); } -static inline struct page *device_private_entry_to_page(swp_entry_t entry) +static inline bool is_device_exclusive_entry(swp_entry_t entry) { - return pfn_to_page(swp_offset(entry)); + return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || + swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; +} + +static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) +{ + return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); } #else /* CONFIG_DEVICE_PRIVATE */ -static inline swp_entry_t make_device_private_entry(struct page *page, bool write) +static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { return swp_entry(0, 0); } -static inline void make_device_private_entry_read(swp_entry_t *entry) +static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) { + return swp_entry(0, 0); } static inline bool is_device_private_entry(swp_entry_t entry) @@ -146,61 +164,52 @@ static inline bool is_device_private_entry(swp_entry_t entry) return false; } -static inline bool is_write_device_private_entry(swp_entry_t entry) +static inline bool is_writable_device_private_entry(swp_entry_t entry) { return false; } -static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) { - return 0; + return swp_entry(0, 0); } -static inline struct page *device_private_entry_to_page(swp_entry_t entry) +static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) { - return NULL; + return swp_entry(0, 0); } -#endif /* CONFIG_DEVICE_PRIVATE */ -#ifdef CONFIG_MIGRATION -static inline swp_entry_t make_migration_entry(struct page *page, int write) +static inline bool is_device_exclusive_entry(swp_entry_t entry) { - BUG_ON(!PageLocked(compound_head(page))); + return false; +} - return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, - page_to_pfn(page)); +static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) +{ + return false; } +#endif /* CONFIG_DEVICE_PRIVATE */ +#ifdef CONFIG_MIGRATION static inline int is_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_READ || swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline int is_write_migration_entry(swp_entry_t entry) +static inline int is_writable_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } -static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) -{ - return swp_offset(entry); -} - -static inline struct page *migration_entry_to_page(swp_entry_t entry) +static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { - struct page *p = pfn_to_page(swp_offset(entry)); - /* - * Any use of migration entries may only occur while the - * corresponding page is locked - */ - BUG_ON(!PageLocked(compound_head(p))); - return p; + return swp_entry(SWP_MIGRATION_READ, offset); } -static inline void make_migration_entry_read(swp_entry_t *entry) +static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) { - *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); + return swp_entry(SWP_MIGRATION_WRITE, offset); } extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, @@ -210,37 +219,58 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, extern void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte); #else - -#define make_migration_entry(page, write) swp_entry(0, 0) -static inline int is_migration_entry(swp_entry_t swp) +static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { - return 0; + return swp_entry(0, 0); } -static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) +static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) { - return 0; + return swp_entry(0, 0); } -static inline struct page *migration_entry_to_page(swp_entry_t entry) +static inline int is_migration_entry(swp_entry_t swp) { - return NULL; + return 0; } -static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) { } -static inline int is_write_migration_entry(swp_entry_t entry) +static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; } #endif +static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) +{ + struct page *p = pfn_to_page(swp_offset(entry)); + + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + BUG_ON(is_migration_entry(entry) && !PageLocked(p)); + + return p; +} + +/* + * A pfn swap entry is a special type of swap entry that always has a pfn stored + * in the swap offset. They are used to represent unaddressable device memory + * and to restrict access to a page undergoing migration. + */ +static inline bool is_pfn_swap_entry(swp_entry_t entry) +{ + return is_migration_entry(entry) || is_device_private_entry(entry) || + is_device_exclusive_entry(entry); +} + struct page_vma_mapped_walk; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -258,6 +288,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) if (pmd_swp_soft_dirty(pmd)) pmd = pmd_swp_clear_soft_dirty(pmd); + if (pmd_swp_uffd_wp(pmd)) + pmd = pmd_swp_clear_uffd_wp(pmd); arch_entry = __pmd_to_swp_entry(pmd); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -323,6 +355,11 @@ static inline int is_hwpoison_entry(swp_entry_t entry) return swp_type(entry) == SWP_HWPOISON; } +static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry) +{ + return swp_offset(entry); +} + static inline void num_poisoned_pages_inc(void) { atomic_long_inc(&num_poisoned_pages); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 216854a5e513..b0cb2a9973f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -2,6 +2,7 @@ #ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H +#include <linux/device.h> #include <linux/dma-direction.h> #include <linux/init.h> #include <linux/types.h> @@ -72,7 +73,8 @@ extern enum swiotlb_force swiotlb_force; * range check to see if the memory was in fact allocated by this * API. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. This is command line adjustable via setup_io_tlb_npages. + * @end. For default swiotlb, this is command line adjustable via + * setup_io_tlb_npages. * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. @@ -83,6 +85,8 @@ extern enum swiotlb_force swiotlb_force; * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator + * @force_bounce: %true if swiotlb bouncing is forced + * @for_alloc: %true if the pool is used for memory allocation */ struct io_tlb_mem { phys_addr_t start; @@ -93,29 +97,42 @@ struct io_tlb_mem { spinlock_t lock; struct dentry *debugfs; bool late_alloc; + bool force_bounce; + bool for_alloc; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; unsigned int list; - } slots[]; + } *slots; }; -extern struct io_tlb_mem *io_tlb_default_mem; +extern struct io_tlb_mem io_tlb_default_mem; -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; return mem && paddr >= mem->start && paddr < mem->end; } +static inline bool is_swiotlb_force_bounce(struct device *dev) +{ + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + + return mem && mem->force_bounce; +} + void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); -bool is_swiotlb_active(void); +bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +{ + return false; +} +static inline bool is_swiotlb_force_bounce(struct device *dev) { return false; } @@ -131,7 +148,7 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev) return SIZE_MAX; } -static inline bool is_swiotlb_active(void) +static inline bool is_swiotlb_active(struct device *dev) { return false; } @@ -144,4 +161,28 @@ static inline void swiotlb_adjust_size(unsigned long size) extern void swiotlb_print_info(void); extern void swiotlb_set_max_segment(unsigned int); +#ifdef CONFIG_DMA_RESTRICTED_POOL +struct page *swiotlb_alloc(struct device *dev, size_t size); +bool swiotlb_free(struct device *dev, struct page *page, size_t size); + +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return dev->dma_io_tlb_mem->for_alloc; +} +#else +static inline struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + return NULL; +} +static inline bool swiotlb_free(struct device *dev, struct page *page, + size_t size) +{ + return false; +} +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return false; +} +#endif /* CONFIG_DMA_RESTRICTED_POOL */ + #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 050511e8f1f8..252243c7783d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -485,8 +485,8 @@ asmlinkage long sys_pipe2(int __user *fildes, int flags); /* fs/quota.c */ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr); -asmlinkage long sys_quotactl_path(unsigned int cmd, const char __user *mountpoint, - qid_t id, void __user *addr); +asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id, + void __user *addr); /* fs/readdir.c */ asmlinkage long sys_getdents64(unsigned int fd, @@ -915,6 +915,7 @@ asmlinkage long sys_mincore(unsigned long start, size_t len, asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, size_t vlen, int behavior, unsigned int flags); +asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags); @@ -1050,6 +1051,7 @@ asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr _ asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, const void __user *rule_attr, __u32 flags); asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); +asmlinkage long sys_memfd_secret(unsigned int flags); /* * Architecture-specific system calls @@ -1157,7 +1159,6 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); asmlinkage long sys_vfork(void); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_send(int, void __user *, size_t, unsigned); -asmlinkage long sys_bdflush(int func, long data); asmlinkage long sys_oldumount(char __user *name); asmlinkage long sys_uselib(const char __user *library); asmlinkage long sys_sysfs(int option, @@ -1372,6 +1373,9 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops, + const struct timespec64 *timeout, + struct ipc_namespace *ns); int __sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index d99ca99837de..1fa2b69c6fc3 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -48,6 +48,8 @@ typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dobool(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h new file mode 100644 index 000000000000..b0dcfa26d07b --- /dev/null +++ b/include/linux/sysfb.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_SYSFB_H +#define _LINUX_SYSFB_H + +/* + * Generic System Framebuffers on x86 + * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/platform_data/simplefb.h> +#include <linux/screen_info.h> + +enum { + M_I17, /* 17-Inch iMac */ + M_I20, /* 20-Inch iMac */ + M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ + M_I24, /* 24-Inch iMac */ + M_I24_8_1, /* 24-Inch iMac, 8,1th gen */ + M_I24_10_1, /* 24-Inch iMac, 10,1th gen */ + M_I27_11_1, /* 27-Inch iMac, 11,1th gen */ + M_MINI, /* Mac Mini */ + M_MINI_3_1, /* Mac Mini, 3,1th gen */ + M_MINI_4_1, /* Mac Mini, 4,1th gen */ + M_MB, /* MacBook */ + M_MB_2, /* MacBook, 2nd rev. */ + M_MB_3, /* MacBook, 3rd rev. */ + M_MB_5_1, /* MacBook, 5th rev. */ + M_MB_6_1, /* MacBook, 6th rev. */ + M_MB_7_1, /* MacBook, 7th rev. */ + M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ + M_MBA, /* MacBook Air */ + M_MBA_3, /* Macbook Air, 3rd rev */ + M_MBP, /* MacBook Pro */ + M_MBP_2, /* MacBook Pro 2nd gen */ + M_MBP_2_2, /* MacBook Pro 2,2nd gen */ + M_MBP_SR, /* MacBook Pro (Santa Rosa) */ + M_MBP_4, /* MacBook Pro, 4th gen */ + M_MBP_5_1, /* MacBook Pro, 5,1th gen */ + M_MBP_5_2, /* MacBook Pro, 5,2th gen */ + M_MBP_5_3, /* MacBook Pro, 5,3rd gen */ + M_MBP_6_1, /* MacBook Pro, 6,1th gen */ + M_MBP_6_2, /* MacBook Pro, 6,2th gen */ + M_MBP_7_1, /* MacBook Pro, 7,1th gen */ + M_MBP_8_2, /* MacBook Pro, 8,2nd gen */ + M_UNKNOWN /* placeholder */ +}; + +struct efifb_dmi_info { + char *optname; + unsigned long base; + int stride; + int width; + int height; + int flags; +}; + +#ifdef CONFIG_EFI + +extern struct efifb_dmi_info efifb_dmi_list[]; +void sysfb_apply_efi_quirks(struct platform_device *pd); + +#else /* CONFIG_EFI */ + +static inline void sysfb_apply_efi_quirks(struct platform_device *pd) +{ +} + +#endif /* CONFIG_EFI */ + +#ifdef CONFIG_SYSFB_SIMPLEFB + +bool sysfb_parse_mode(const struct screen_info *si, + struct simplefb_platform_data *mode); +int sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode); + +#else /* CONFIG_SYSFB_SIMPLE */ + +static inline bool sysfb_parse_mode(const struct screen_info *si, + struct simplefb_platform_data *mode) +{ + return false; +} + +static inline int sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) +{ + return -EINVAL; +} + +#endif /* CONFIG_SYSFB_SIMPLE */ + +#endif /* _LINUX_SYSFB_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index d76a1ddf83a3..e3f1e8ac1f85 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -162,6 +162,12 @@ static const struct attribute_group _name##_group = { \ }; \ __ATTRIBUTE_GROUPS(_name) +#define BIN_ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group _name##_group = { \ + .bin_attrs = _name##_attrs, \ +}; \ +__ATTRIBUTE_GROUPS(_name) + struct file; struct vm_area_struct; struct address_space; @@ -170,7 +176,7 @@ struct bin_attribute { struct attribute attr; size_t size; void *private; - struct address_space *mapping; + struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 54269e47ac9a..3ebfea0781f1 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -27,6 +27,7 @@ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ +#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev); * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** * tee_shm_register() - Register shared memory buffer diff --git a/include/linux/thermal.h b/include/linux/thermal.h index d296f3b88fb9..c314893970b3 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -285,7 +285,7 @@ struct thermal_zone_params { }; /** - * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones + * struct thermal_zone_of_device_ops - callbacks for handling DT based zones * * Mandatory: * @get_temp: a pointer to a function that reads the sensor temperature. @@ -404,12 +404,13 @@ static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } static inline struct thermal_cooling_device * -thermal_cooling_device_register(char *type, void *devdata, +thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, - char *type, void *devdata, const struct thermal_cooling_device_ops *ops) + const char *type, void *devdata, + const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 157762db9d4b..0999f6317978 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,6 +9,7 @@ #define _LINUX_THREAD_INFO_H #include <linux/types.h> +#include <linux/limits.h> #include <linux/bug.h> #include <linux/restart_block.h> #include <linux/errno.h> diff --git a/include/linux/threads.h b/include/linux/threads.h index 18d5a74bcc3d..c34173e6c5f1 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -38,7 +38,7 @@ * Define a minimum number of pids per cpu. Heuristically based * on original pid max of 32k for 32 cpus. Also, increase the * minimum settable value for pid_max on the running system based - * on similar defaults. See kernel/pid.c:pidmap_init() for details. + * on similar defaults. See kernel/pid.c:pid_idr_init() for details. */ #define PIDS_PER_CPU_DEFAULT 1024 #define PIDS_PER_CPU_MIN 8 diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index e7c96c37174f..124e13cb1469 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -468,6 +468,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) * @interrupt_work: Work scheduled to handle ring interrupt when no * MSI-X is used. * @hop_count: Number of rings (end point hops) supported by NHI. + * @quirks: NHI specific quirks if any */ struct tb_nhi { spinlock_t lock; @@ -480,6 +481,7 @@ struct tb_nhi { bool going_away; struct work_struct interrupt_work; u32 hop_count; + unsigned long quirks; }; /** diff --git a/include/linux/tick.h b/include/linux/tick.h index 7340613c7eff..bfd571f18cfd 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -11,6 +11,7 @@ #include <linux/context_tracking_state.h> #include <linux/cpumask.h> #include <linux/sched.h> +#include <linux/rcupdate.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void __init tick_init(void); @@ -185,13 +186,17 @@ static inline bool tick_nohz_full_enabled(void) return tick_nohz_full_running; } -static inline bool tick_nohz_full_cpu(int cpu) -{ - if (!tick_nohz_full_enabled()) - return false; - - return cpumask_test_cpu(cpu, tick_nohz_full_mask); -} +/* + * Check if a CPU is part of the nohz_full subset. Arrange for evaluating + * the cpu expression (typically smp_processor_id()) _after_ the static + * key. + */ +#define tick_nohz_full_cpu(_cpu) ({ \ + bool __ret = false; \ + if (tick_nohz_full_enabled()) \ + __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \ + __ret; \ +}) static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { @@ -207,7 +212,7 @@ extern void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit); -extern void tick_nohz_dep_set_signal(struct signal_struct *signal, +extern void tick_nohz_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit); @@ -252,11 +257,11 @@ static inline void tick_dep_clear_task(struct task_struct *tsk, if (tick_nohz_full_enabled()) tick_nohz_dep_clear_task(tsk, bit); } -static inline void tick_dep_set_signal(struct signal_struct *signal, +static inline void tick_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) - tick_nohz_dep_set_signal(signal, bit); + tick_nohz_dep_set_signal(tsk, bit); } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) @@ -284,7 +289,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { } -static inline void tick_dep_set_signal(struct signal_struct *signal, +static inline void tick_dep_set_signal(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } @@ -300,4 +305,10 @@ static inline void tick_nohz_task_switch(void) __tick_nohz_task_switch(); } +static inline void tick_nohz_user_enter_prepare(void) +{ + if (tick_nohz_full_cpu(smp_processor_id())) + rcu_nocb_flush_deferred_wakeup(); +} + #endif diff --git a/include/linux/time64.h b/include/linux/time64.h index 5117cb5b5656..81b9686a2079 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -25,7 +25,9 @@ struct itimerspec64 { #define TIME64_MIN (-TIME64_MAX - 1) #define KTIME_MAX ((s64)~((u64)1 << 63)) +#define KTIME_MIN (-KTIME_MAX - 1) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) /* * Limits for settimeofday(): @@ -124,10 +126,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts) */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { - /* Prevent multiplication overflow */ - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + /* Prevent multiplication overflow / underflow */ + if (ts->tv_sec >= KTIME_SEC_MAX) return KTIME_MAX; + if (ts->tv_sec <= KTIME_SEC_MIN) + return KTIME_MIN; + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } diff --git a/include/linux/timer.h b/include/linux/timer.h index 4118a97e62fb..fda13c9d1256 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) -extern bool timer_curr_running(struct timer_list *timer); - extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/linux/trace.h b/include/linux/trace.h index be1e130ed87c..bf169612ffe1 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -41,6 +41,13 @@ int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); int trace_array_destroy(struct trace_array *tr); + +/* For osnoise tracer */ +int osnoise_arch_register(void); +void osnoise_arch_unregister(void); +void osnoise_trace_irq_entry(int id); +void osnoise_trace_irq_exit(int id, const char *desc); + #endif /* CONFIG_TRACING */ #endif /* _LINUX_TRACE_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ad413b382a3c..3e475eeb5a99 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -310,8 +310,10 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, + TRACE_EVENT_FL_DYNAMIC_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, + TRACE_EVENT_FL_EPROBE_BIT, }; /* @@ -321,8 +323,10 @@ enum { * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * TRACEPOINT - Event is a tracepoint + * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe + * EPROBE - Event is an event probe */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -330,8 +334,10 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), + TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), + TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -347,7 +353,14 @@ struct trace_event_call { struct trace_event event; char *print_fmt; struct event_filter *filter; - void *mod; + /* + * Static events can disappear with modules, + * where as dynamic ones need their own ref count. + */ + union { + void *module; + atomic_t refcnt; + }; void *data; /* See the TRACE_EVENT_FL_* flags above */ @@ -363,6 +376,42 @@ struct trace_event_call { #endif }; +#ifdef CONFIG_DYNAMIC_EVENTS +bool trace_event_dyn_try_get_ref(struct trace_event_call *call); +void trace_event_dyn_put_ref(struct trace_event_call *call); +bool trace_event_dyn_busy(struct trace_event_call *call); +#else +static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) +{ + /* Without DYNAMIC_EVENTS configured, nothing should be calling this */ + return false; +} +static inline void trace_event_dyn_put_ref(struct trace_event_call *call) +{ +} +static inline bool trace_event_dyn_busy(struct trace_event_call *call) +{ + /* Nothing should call this without DYNAIMIC_EVENTS configured. */ + return true; +} +#endif + +static inline bool trace_event_try_get_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + return trace_event_dyn_try_get_ref(call); + else + return try_module_get(call->module); +} + +static inline void trace_event_put_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + trace_event_dyn_put_ref(call); + else + module_put(call->module); +} + #ifdef CONFIG_PERF_EVENTS static inline bool bpf_prog_array_valid(struct trace_event_call *call) { @@ -634,6 +683,7 @@ enum event_trigger_type { ETT_EVENT_ENABLE = (1 << 3), ETT_EVENT_HIST = (1 << 4), ETT_HIST_ENABLE = (1 << 5), + ETT_EVENT_EPROBE = (1 << 6), }; extern int filter_match_preds(struct event_filter *filter, void *rec); @@ -675,7 +725,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); -int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); +int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); @@ -692,7 +742,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c } static inline int -perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) +perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) { return -EOPNOTSUPP; } @@ -803,6 +853,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); +void perf_event_free_bpf_prog(struct perf_event *event); + void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 13f65420f188..28031b15f878 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -41,7 +41,17 @@ extern int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio); extern int +tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, + int prio); +extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); +static inline int +tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe, + void *data) +{ + return tracepoint_probe_register_prio_may_exist(tp, probe, data, + TRACEPOINT_DEFAULT_PRIO); +} extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv); @@ -465,7 +475,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_prid) is equivalent to a standard declaration: + * * __field(pid_t, prev_pid) is equivalent to a standard declaration: * * * * pid_t prev_pid; * * diff --git a/include/linux/tty.h b/include/linux/tty.h index e5d6b1f28823..168e57e40bbb 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -6,11 +6,12 @@ #include <linux/major.h> #include <linux/termios.h> #include <linux/workqueue.h> +#include <linux/tty_buffer.h> #include <linux/tty_driver.h> #include <linux/tty_ldisc.h> +#include <linux/tty_port.h> #include <linux/mutex.h> #include <linux/tty_flags.h> -#include <linux/seq_file.h> #include <uapi/linux/tty.h> #include <linux/rwsem.h> #include <linux/llist.h> @@ -31,54 +32,6 @@ */ #define __DISABLED_CHAR '\0' -struct tty_buffer { - union { - struct tty_buffer *next; - struct llist_node free; - }; - int used; - int size; - int commit; - int read; - int flags; - /* Data points here */ - unsigned long data[]; -}; - -/* Values for .flags field of tty_buffer */ -#define TTYB_NORMAL 1 /* buffer has no flags buffer */ - -static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) -{ - return ((unsigned char *)b->data) + ofs; -} - -static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) -{ - return (char *)char_buf_ptr(b, ofs) + b->size; -} - -struct tty_bufhead { - struct tty_buffer *head; /* Queue head */ - struct work_struct work; - struct mutex lock; - atomic_t priority; - struct tty_buffer sentinel; - struct llist_head free; /* Free queue head */ - atomic_t mem_used; /* In-use buffers excluding free list */ - int mem_limit; - struct tty_buffer *tail; /* Active buffer */ -}; -/* - * When a break, frame error, or parity error happens, these codes are - * stuffed into the flags buffer. - */ -#define TTY_NORMAL 0 -#define TTY_BREAK 1 -#define TTY_FRAME 2 -#define TTY_PARITY 3 -#define TTY_OVERRUN 4 - #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) @@ -164,99 +117,29 @@ struct tty_bufhead { struct device; struct signal_struct; +struct tty_operations; -/* - * Port level information. Each device keeps its own port level information - * so provide a common structure for those ports wanting to use common support - * routines. +/** + * struct tty_struct - state associated with a tty while open * - * The tty port has a different lifetime to the tty so must be kept apart. - * In addition be careful as tty -> port mappings are valid for the life - * of the tty object but in many cases port -> tty mappings are valid only - * until a hangup so don't use the wrong path. - */ - -struct tty_port; - -struct tty_port_operations { - /* Return 1 if the carrier is raised */ - int (*carrier_raised)(struct tty_port *port); - /* Control the DTR line */ - void (*dtr_rts)(struct tty_port *port, int raise); - /* Called when the last close completes or a hangup finishes - IFF the port was initialized. Do not use to free resources. Called - under the port mutex to serialize against activate/shutdowns */ - void (*shutdown)(struct tty_port *port); - /* Called under the port mutex from tty_port_open, serialized using - the port mutex */ - /* FIXME: long term getting the tty argument *out* of this would be - good for consoles */ - int (*activate)(struct tty_port *port, struct tty_struct *tty); - /* Called on the final put of a port */ - void (*destruct)(struct tty_port *port); -}; - -struct tty_port_client_operations { - int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); - void (*write_wakeup)(struct tty_port *port); -}; - -extern const struct tty_port_client_operations tty_port_default_client_ops; - -struct tty_port { - struct tty_bufhead buf; /* Locked internally */ - struct tty_struct *tty; /* Back pointer */ - struct tty_struct *itty; /* internal back ptr */ - const struct tty_port_operations *ops; /* Port operations */ - const struct tty_port_client_operations *client_ops; /* Port client operations */ - spinlock_t lock; /* Lock protecting tty field */ - int blocked_open; /* Waiting to open */ - int count; /* Usage count */ - wait_queue_head_t open_wait; /* Open waiters */ - wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* User TTY flags ASYNC_ */ - unsigned long iflags; /* Internal flags TTY_PORT_ */ - unsigned char console:1; /* port is a console */ - struct mutex mutex; /* Locking */ - struct mutex buf_mutex; /* Buffer alloc lock */ - unsigned char *xmit_buf; /* Optional buffer */ - unsigned int close_delay; /* Close port delay */ - unsigned int closing_wait; /* Delay for output */ - int drain_delay; /* Set to zero if no pure time - based drain is needed else - set to size of fifo */ - struct kref kref; /* Ref counter */ - void *client_data; -}; - -/* tty_port::iflags bits -- use atomic bit ops */ -#define TTY_PORT_INITIALIZED 0 /* device is initialized */ -#define TTY_PORT_SUSPENDED 1 /* device is suspended */ -#define TTY_PORT_ACTIVE 2 /* device is open */ - -/* - * uart drivers: use the uart_port::status field and the UPSTAT_* defines - * for s/w-based flow control steering and carrier detection status - */ -#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ -#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ -#define TTY_PORT_KOPENED 5 /* device exclusively opened by - kernel */ - -/* - * Where all of the state associated with a tty is kept while the tty - * is open. Since the termios state should be kept even if the tty - * has been closed --- for things like the baud rate, etc --- it is - * not stored here, but rather a pointer to the real state is stored - * here. Possible the winsize structure should have the same - * treatment, but (1) the default 80x24 is usually right and (2) it's - * most often used by a windowing system, which will set the correct - * size each time the window is created or resized anyway. - * - TYT, 9/14/92 + * @flow.lock: lock for flow members + * @flow.stopped: tty stopped/started by tty_stop/tty_start + * @flow.tco_stopped: tty stopped/started by TCOOFF/TCOON ioctls (it has + * precedense over @flow.stopped) + * @flow.unused: alignment for Alpha, so that no members other than @flow.* are + * modified by the same 64b word store. The @flow's __aligned is + * there for the very same reason. + * @ctrl.lock: lock for ctrl members + * @ctrl.pgrp: process group of this tty (setpgrp(2)) + * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both + * @ctrl.lock and legacy mutex, readers must use at least one of + * them. + * @ctrl.pktstatus: packet mode status (bitwise OR of TIOCPKT_* constants) + * @ctrl.packet: packet mode enabled + * + * All of the state associated with a tty while the tty is open. Persistent + * storage for tty devices is referenced here as @port in struct tty_port. */ - -struct tty_operations; - struct tty_struct { int magic; struct kref kref; @@ -274,27 +157,30 @@ struct tty_struct { struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; - spinlock_t ctrl_lock; - spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ struct ktermios termios, termios_locked; char name[64]; - struct pid *pgrp; /* Protected by ctrl lock */ - /* - * Writes protected by both ctrl lock and legacy mutex, readers must use - * at least one of them. - */ - struct pid *session; unsigned long flags; int count; struct winsize winsize; /* winsize_mutex */ - unsigned long stopped:1, /* flow_lock */ - flow_stopped:1, - unused:BITS_PER_LONG - 2; + + struct { + spinlock_t lock; + bool stopped; + bool tco_stopped; + unsigned long unused[0]; + } __aligned(sizeof(unsigned long)) flow; + + struct { + spinlock_t lock; + struct pid *pgrp; + struct pid *session; + unsigned char pktstatus; + bool packet; + unsigned long unused[0]; + } __aligned(sizeof(unsigned long)) ctrl; + int hw_stopped; - unsigned long ctrl_status:8, /* ctrl_lock */ - packet:1, - unused_ctrl:BITS_PER_LONG - 9; unsigned int receive_room; /* Bytes free for queue */ int flow_change; @@ -434,22 +320,12 @@ extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern void stop_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); -extern int tty_register_driver(struct tty_driver *driver); -extern void tty_unregister_driver(struct tty_driver *driver); -extern struct device *tty_register_device(struct tty_driver *driver, - unsigned index, struct device *dev); -extern struct device *tty_register_device_attr(struct tty_driver *driver, - unsigned index, struct device *device, - void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); -extern int tty_chars_in_buffer(struct tty_struct *tty); -extern int tty_write_room(struct tty_struct *tty); +extern unsigned int tty_chars_in_buffer(struct tty_struct *tty); +extern unsigned int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); -extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); extern int tty_throttle_safe(struct tty_struct *tty); extern int tty_unthrottle_safe(struct tty_struct *tty); @@ -484,17 +360,14 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) return tty_termios_baud_rate(&tty->termios); } +unsigned char tty_get_char_size(unsigned int cflag); +unsigned char tty_get_frame_size(unsigned int cflag); + extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); -extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); -extern void tty_ldisc_deref(struct tty_ldisc *); -extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); -extern const struct seq_operations tty_ldiscs_seq_ops; - extern void tty_wakeup(struct tty_struct *tty); -extern void tty_ldisc_flush(struct tty_struct *tty); extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); @@ -508,128 +381,6 @@ extern int tty_standard_install(struct tty_driver *driver, extern struct mutex tty_mutex; -extern void tty_port_init(struct tty_port *port); -extern void tty_port_link_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern struct device *tty_port_register_device(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern struct device *tty_port_register_device_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_port_unregister_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern int tty_port_alloc_xmit_buf(struct tty_port *port); -extern void tty_port_free_xmit_buf(struct tty_port *port); -extern void tty_port_destroy(struct tty_port *port); -extern void tty_port_put(struct tty_port *port); - -static inline struct tty_port *tty_port_get(struct tty_port *port) -{ - if (port && kref_get_unless_zero(&port->kref)) - return port; - return NULL; -} - -/* If the cts flow control is enabled, return true. */ -static inline bool tty_port_cts_enabled(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); -} - -static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); -} - -static inline bool tty_port_active(const struct tty_port *port) -{ - return test_bit(TTY_PORT_ACTIVE, &port->iflags); -} - -static inline void tty_port_set_active(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); -} - -static inline bool tty_port_check_carrier(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CHECK_CD, &port->iflags); -} - -static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); -} - -static inline bool tty_port_suspended(const struct tty_port *port) -{ - return test_bit(TTY_PORT_SUSPENDED, &port->iflags); -} - -static inline void tty_port_set_suspended(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); -} - -static inline bool tty_port_initialized(const struct tty_port *port) -{ - return test_bit(TTY_PORT_INITIALIZED, &port->iflags); -} - -static inline void tty_port_set_initialized(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); -} - -static inline bool tty_port_kopened(const struct tty_port *port) -{ - return test_bit(TTY_PORT_KOPENED, &port->iflags); -} - -static inline void tty_port_set_kopened(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_KOPENED, &port->iflags, val); -} - -extern struct tty_struct *tty_port_tty_get(struct tty_port *port); -extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); -extern int tty_port_carrier_raised(struct tty_port *port); -extern void tty_port_raise_dtr_rts(struct tty_port *port); -extern void tty_port_lower_dtr_rts(struct tty_port *port); -extern void tty_port_hangup(struct tty_port *port); -extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); -extern void tty_port_tty_wakeup(struct tty_port *port); -extern int tty_port_block_til_ready(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_close_start(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); -extern void tty_port_close(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, - struct tty_struct *tty); -extern int tty_port_open(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -static inline int tty_port_users(struct tty_port *port) -{ - return port->count + port->blocked_open; -} - -extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); -extern int tty_unregister_ldisc(int disc); -extern int tty_set_ldisc(struct tty_struct *tty, int disc); -extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, - char *f, int count); - /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); #ifdef CONFIG_TTY @@ -677,12 +428,4 @@ extern void tty_lock_slave(struct tty_struct *tty); extern void tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); -#ifdef CONFIG_PROC_FS -extern void proc_tty_register_driver(struct tty_driver *); -extern void proc_tty_unregister_driver(struct tty_driver *); -#else -static inline void proc_tty_register_driver(struct tty_driver *d) {} -static inline void proc_tty_unregister_driver(struct tty_driver *d) {} -#endif - #endif diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h new file mode 100644 index 000000000000..3b9d77604291 --- /dev/null +++ b/include/linux/tty_buffer.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_BUFFER_H +#define _LINUX_TTY_BUFFER_H + +#include <linux/atomic.h> +#include <linux/llist.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + /* Data points here */ + unsigned long data[]; +}; + +/* Values for .flags field of tty_buffer */ +#define TTYB_NORMAL 1 /* buffer has no flags buffer */ + +static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) +{ + return ((unsigned char *)b->data) + ofs; +} + +static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) +{ + return (char *)char_buf_ptr(b, ofs) + b->size; +} + +struct tty_bufhead { + struct tty_buffer *head; /* Queue head */ + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; /* Free queue head */ + atomic_t mem_used; /* In-use buffers excluding free list */ + int mem_limit; + struct tty_buffer *tail; /* Active buffer */ +}; + +/* + * When a break, frame error, or parity error happens, these codes are + * stuffed into the flags buffer. + */ +#define TTY_NORMAL 0 +#define TTY_BREAK 1 +#define TTY_FRAME 2 +#define TTY_PARITY 3 +#define TTY_OVERRUN 4 + +#endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 2f719b471d52..c20431d8def8 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -89,7 +89,7 @@ * * Note: Do not call this function directly, call tty_driver_flush_chars * - * int (*write_room)(struct tty_struct *tty); + * unsigned int (*write_room)(struct tty_struct *tty); * * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject @@ -136,7 +136,7 @@ * the line discipline are close to full, and it should somehow * signal that no more characters should be sent to the tty. * - * Optional: Always invoke via tty_throttle(), called under the + * Optional: Always invoke via tty_throttle_safe(), called under the * termios lock. * * void (*unthrottle)(struct tty_struct * tty); @@ -153,7 +153,7 @@ * This routine notifies the tty driver that it should stop * outputting characters to the tty device. * - * Called with ->flow_lock held. Serialized with start() method. + * Called with ->flow.lock held. Serialized with start() method. * * Optional: * @@ -164,7 +164,7 @@ * This routine notifies the tty driver that it resume sending * characters to the tty device. * - * Called with ->flow_lock held. Serialized with stop() method. + * Called with ->flow.lock held. Serialized with stop() method. * * Optional: * @@ -233,6 +233,7 @@ #include <linux/export.h> #include <linux/fs.h> +#include <linux/kref.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/termios.h> @@ -256,8 +257,8 @@ struct tty_operations { const unsigned char *buf, int count); int (*put_char)(struct tty_struct *tty, unsigned char ch); void (*flush_chars)(struct tty_struct *tty); - int (*write_room)(struct tty_struct *tty); - int (*chars_in_buffer)(struct tty_struct *tty); + unsigned int (*write_room)(struct tty_struct *tty); + unsigned int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, @@ -328,9 +329,6 @@ extern struct list_head tty_drivers; extern struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); -extern void put_tty_driver(struct tty_driver *driver); -extern void tty_set_operations(struct tty_driver *driver, - const struct tty_operations *op); extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); @@ -339,24 +337,18 @@ extern void tty_driver_kref_put(struct tty_driver *driver); #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) -/* - * DEPRECATED Do not use this in new code, use tty_alloc_driver instead. - * (And change the return value checks.) - */ -static inline struct tty_driver *alloc_tty_driver(unsigned int lines) -{ - struct tty_driver *ret = tty_alloc_driver(lines, 0); - if (IS_ERR(ret)) - return NULL; - return ret; -} - static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } +static inline void tty_set_operations(struct tty_driver *driver, + const struct tty_operations *op) +{ + driver->ops = op; +} + /* tty driver magic number */ #define TTY_DRIVER_MAGIC 0x5402 @@ -434,4 +426,21 @@ static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 +int tty_register_driver(struct tty_driver *driver); +void tty_unregister_driver(struct tty_driver *driver); +struct device *tty_register_device(struct tty_driver *driver, unsigned index, + struct device *dev); +struct device *tty_register_device_attr(struct tty_driver *driver, + unsigned index, struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_unregister_device(struct tty_driver *driver, unsigned index); + +#ifdef CONFIG_PROC_FS +void proc_tty_register_driver(struct tty_driver *); +void proc_tty_unregister_driver(struct tty_driver *); +#else +static inline void proc_tty_register_driver(struct tty_driver *d) {} +static inline void proc_tty_unregister_driver(struct tty_driver *d) {} +#endif + #endif /* #ifdef _LINUX_TTY_DRIVER_H */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 767f62086bd9..32284992b31a 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -2,8 +2,13 @@ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H +#include <linux/tty_buffer.h> +#include <linux/tty_port.h> + +struct tty_ldisc; + extern int tty_buffer_set_limit(struct tty_port *port, int limit); -extern int tty_buffer_space_avail(struct tty_port *port); +extern unsigned int tty_buffer_space_avail(struct tty_port *port); extern int tty_buffer_request_room(struct tty_port *port, size_t size); extern int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size); @@ -37,6 +42,9 @@ static inline int tty_insert_flip_string(struct tty_port *port, return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size); } +int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, + const char *f, int count); + extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 31284b55bd4f..b1d812e902aa 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -2,6 +2,8 @@ #ifndef _LINUX_TTY_LDISC_H #define _LINUX_TTY_LDISC_H +struct tty_struct; + /* * This structure defines the interface between the tty line discipline * implementation and the tty routines. The following routines can be @@ -126,6 +128,9 @@ #include <linux/fs.h> #include <linux/wait.h> #include <linux/atomic.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/seq_file.h> /* * the semaphore definition @@ -201,15 +206,13 @@ struct tty_ldisc_ops { * The following routines are called from below. */ void (*receive_buf)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); + const char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, - char *fp, int count); + const char *fp, int count); struct module *owner; - - int refcount; }; struct tty_ldisc { @@ -222,4 +225,16 @@ struct tty_ldisc { #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) +extern const struct seq_operations tty_ldiscs_seq_ops; + +struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); +void tty_ldisc_deref(struct tty_ldisc *); +struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); + +void tty_ldisc_flush(struct tty_struct *tty); + +int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); +void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); +int tty_set_ldisc(struct tty_struct *tty, int disc); + #endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h new file mode 100644 index 000000000000..6e86e9e118b6 --- /dev/null +++ b/include/linux/tty_port.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_PORT_H +#define _LINUX_TTY_PORT_H + +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/tty_buffer.h> +#include <linux/wait.h> + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct attribute_group; +struct tty_driver; +struct tty_port; +struct tty_struct; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ + void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ + void (*shutdown)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); +}; + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *port); +}; + +extern const struct tty_port_client_operations tty_port_default_client_ops; + +struct tty_port { + struct tty_bufhead buf; /* Locked internally */ + struct tty_struct *tty; /* Back pointer */ + struct tty_struct *itty; /* internal back ptr */ + const struct tty_port_operations *ops; /* Port operations */ + const struct tty_port_client_operations *client_ops; /* Port client operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ + int count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ + unsigned char console:1; /* port is a console */ + struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ + unsigned char *xmit_buf; /* Optional buffer */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ + struct kref kref; /* Ref counter */ + void *client_data; +}; + +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ +#define TTY_PORT_KOPENED 5 /* device exclusively opened by + kernel */ + +void tty_port_init(struct tty_port *port); +void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, + unsigned index); +struct device *tty_port_register_device(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); +int tty_port_alloc_xmit_buf(struct tty_port *port); +void tty_port_free_xmit_buf(struct tty_port *port); +void tty_port_destroy(struct tty_port *port); +void tty_port_put(struct tty_port *port); + +static inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port && kref_get_unless_zero(&port->kref)) + return port; + return NULL; +} + +/* If the cts flow control is enabled, return true. */ +static inline bool tty_port_cts_enabled(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); +} + +static inline bool tty_port_active(const struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); +} + +static inline bool tty_port_check_carrier(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); +} + +static inline bool tty_port_suspended(const struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); +} + +static inline bool tty_port_initialized(const struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); +} + +static inline bool tty_port_kopened(const struct tty_port *port) +{ + return test_bit(TTY_PORT_KOPENED, &port->iflags); +} + +static inline void tty_port_set_kopened(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_KOPENED, &port->iflags, val); +} + +struct tty_struct *tty_port_tty_get(struct tty_port *port); +void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +int tty_port_carrier_raised(struct tty_port *port); +void tty_port_raise_dtr_rts(struct tty_port *port); +void tty_port_lower_dtr_rts(struct tty_port *port); +void tty_port_hangup(struct tty_port *port); +void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +void tty_port_tty_wakeup(struct tty_port *port); +int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); +void tty_port_close(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_install(struct tty_port *port, struct tty_driver *driver, + struct tty_struct *tty); +int tty_port_open(struct tty_port *port, struct tty_struct *tty, + struct file *filp); + +static inline int tty_port_users(struct tty_port *port) +{ + return port->count + port->blocked_open; +} + +#endif diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h index 20d310331eb5..46b15e2aaefb 100644 --- a/include/linux/typecheck.h +++ b/include/linux/typecheck.h @@ -22,4 +22,13 @@ (void)__tmp; \ }) +/* + * Check at compile time that something is a pointer type. + */ +#define typecheck_pointer(x) \ +({ typeof(x) __dummy; \ + (void)sizeof(*__dummy); \ + 1; \ +}) + #endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c05e903cef02..ac0394087f7d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -200,16 +200,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) n = _copy_to_user(to, from, n); return n; } -#ifdef CONFIG_COMPAT -static __always_inline unsigned long __must_check -copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - might_fault(); - if (access_ok(to, n) && access_ok(from, n)) - n = raw_copy_in_user(to, from, n); - return n; -} -#endif #ifndef copy_mc_to_kernel /* diff --git a/include/linux/uio.h b/include/linux/uio.h index d3ec87706d75..5265024e8b90 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -19,21 +19,17 @@ struct kvec { enum iter_type { /* iter types */ - ITER_IOVEC = 4, - ITER_KVEC = 8, - ITER_BVEC = 16, - ITER_PIPE = 32, - ITER_DISCARD = 64, - ITER_XARRAY = 128, + ITER_IOVEC, + ITER_KVEC, + ITER_BVEC, + ITER_PIPE, + ITER_XARRAY, + ITER_DISCARD, }; struct iov_iter { - /* - * Bit 0 is the read/write bit, set if we're writing. - * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and - * the caller isn't expecting to drop a page reference when done. - */ - unsigned int type; + u8 iter_type; + bool data_source; size_t iov_offset; size_t count; union { @@ -51,11 +47,12 @@ struct iov_iter { }; loff_t xarray_start; }; + size_t truncated; }; static inline enum iter_type iov_iter_type(const struct iov_iter *i) { - return i->type & ~(READ | WRITE); + return i->iter_type; } static inline bool iter_is_iovec(const struct iov_iter *i) @@ -90,7 +87,7 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i) static inline unsigned char iov_iter_rw(const struct iov_iter *i) { - return i->type & (READ | WRITE); + return i->data_source ? WRITE : READ; } /* @@ -119,11 +116,11 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -size_t iov_iter_copy_from_user_atomic(struct page *page, - struct iov_iter *i, unsigned long offset, size_t bytes); +size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, + size_t bytes, struct iov_iter *i); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); +int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); @@ -132,9 +129,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); -bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); -bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); static __always_inline __must_check size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) @@ -157,10 +152,11 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return false; - else - return _copy_from_iter_full(addr, bytes, i); + size_t copied = copy_from_iter(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; } static __always_inline __must_check @@ -175,10 +171,11 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return false; - else - return _copy_from_iter_full_nocache(addr, bytes, i); + size_t copied = copy_from_iter_nocache(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; } #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE @@ -258,8 +255,10 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) * conversion in assignement is by definition greater than all * values of size_t, including old i->count. */ - if (i->count > count) + if (i->count > count) { + i->truncated += i->count - count; i->count = count; + } } /* @@ -268,6 +267,7 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) */ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { + i->truncated -= count - i->count; i->count = count; } @@ -278,7 +278,17 @@ struct csum_state { size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); + +static __always_inline __must_check +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i); @@ -294,8 +304,4 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec, int import_single_range(int type, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i); -int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, - int (*f)(struct kvec *vec, void *context), - void *context); - #endif diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h deleted file mode 100644 index 167aa849c0ce..000000000000 --- a/include/linux/unaligned/access_ok.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_ACCESS_OK_H -#define _LINUX_UNALIGNED_ACCESS_OK_H - -#include <linux/kernel.h> -#include <asm/byteorder.h> - -static __always_inline u16 get_unaligned_le16(const void *p) -{ - return le16_to_cpup((__le16 *)p); -} - -static __always_inline u32 get_unaligned_le32(const void *p) -{ - return le32_to_cpup((__le32 *)p); -} - -static __always_inline u64 get_unaligned_le64(const void *p) -{ - return le64_to_cpup((__le64 *)p); -} - -static __always_inline u16 get_unaligned_be16(const void *p) -{ - return be16_to_cpup((__be16 *)p); -} - -static __always_inline u32 get_unaligned_be32(const void *p) -{ - return be32_to_cpup((__be32 *)p); -} - -static __always_inline u64 get_unaligned_be64(const void *p) -{ - return be64_to_cpup((__be64 *)p); -} - -static __always_inline void put_unaligned_le16(u16 val, void *p) -{ - *((__le16 *)p) = cpu_to_le16(val); -} - -static __always_inline void put_unaligned_le32(u32 val, void *p) -{ - *((__le32 *)p) = cpu_to_le32(val); -} - -static __always_inline void put_unaligned_le64(u64 val, void *p) -{ - *((__le64 *)p) = cpu_to_le64(val); -} - -static __always_inline void put_unaligned_be16(u16 val, void *p) -{ - *((__be16 *)p) = cpu_to_be16(val); -} - -static __always_inline void put_unaligned_be32(u32 val, void *p) -{ - *((__be32 *)p) = cpu_to_be32(val); -} - -static __always_inline void put_unaligned_be64(u64 val, void *p) -{ - *((__be64 *)p) = cpu_to_be64(val); -} - -#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h deleted file mode 100644 index c43ff5918c8a..000000000000 --- a/include/linux/unaligned/be_byteshift.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H -#define _LINUX_UNALIGNED_BE_BYTESHIFT_H - -#include <linux/types.h> - -static inline u16 __get_unaligned_be16(const u8 *p) -{ - return p[0] << 8 | p[1]; -} - -static inline u32 __get_unaligned_be32(const u8 *p) -{ - return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; -} - -static inline u64 __get_unaligned_be64(const u8 *p) -{ - return (u64)__get_unaligned_be32(p) << 32 | - __get_unaligned_be32(p + 4); -} - -static inline void __put_unaligned_be16(u16 val, u8 *p) -{ - *p++ = val >> 8; - *p++ = val; -} - -static inline void __put_unaligned_be32(u32 val, u8 *p) -{ - __put_unaligned_be16(val >> 16, p); - __put_unaligned_be16(val, p + 2); -} - -static inline void __put_unaligned_be64(u64 val, u8 *p) -{ - __put_unaligned_be32(val >> 32, p); - __put_unaligned_be32(val, p + 4); -} - -static inline u16 get_unaligned_be16(const void *p) -{ - return __get_unaligned_be16(p); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return __get_unaligned_be32(p); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return __get_unaligned_be64(p); -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ - __put_unaligned_be16(val, p); -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ - __put_unaligned_be32(val, p); -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ - __put_unaligned_be64(val, p); -} - -#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h deleted file mode 100644 index 7164214a4ba1..000000000000 --- a/include/linux/unaligned/be_memmove.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H -#define _LINUX_UNALIGNED_BE_MEMMOVE_H - -#include <linux/unaligned/memmove.h> - -static inline u16 get_unaligned_be16(const void *p) -{ - return __get_unaligned_memmove16((const u8 *)p); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return __get_unaligned_memmove32((const u8 *)p); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return __get_unaligned_memmove64((const u8 *)p); -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ - __put_unaligned_memmove16(val, p); -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ - __put_unaligned_memmove32(val, p); -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ - __put_unaligned_memmove64(val, p); -} - -#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h deleted file mode 100644 index 15ea503a13fc..000000000000 --- a/include/linux/unaligned/be_struct.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_BE_STRUCT_H -#define _LINUX_UNALIGNED_BE_STRUCT_H - -#include <linux/unaligned/packed_struct.h> - -static inline u16 get_unaligned_be16(const void *p) -{ - return __get_unaligned_cpu16((const u8 *)p); -} - -static inline u32 get_unaligned_be32(const void *p) -{ - return __get_unaligned_cpu32((const u8 *)p); -} - -static inline u64 get_unaligned_be64(const void *p) -{ - return __get_unaligned_cpu64((const u8 *)p); -} - -static inline void put_unaligned_be16(u16 val, void *p) -{ - __put_unaligned_cpu16(val, p); -} - -static inline void put_unaligned_be32(u32 val, void *p) -{ - __put_unaligned_cpu32(val, p); -} - -static inline void put_unaligned_be64(u64 val, void *p) -{ - __put_unaligned_cpu64(val, p); -} - -#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h deleted file mode 100644 index 303289492859..000000000000 --- a/include/linux/unaligned/generic.h +++ /dev/null @@ -1,115 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_GENERIC_H -#define _LINUX_UNALIGNED_GENERIC_H - -#include <linux/types.h> - -/* - * Cause a link-time error if we try an unaligned access other than - * 1,2,4 or 8 bytes long - */ -extern void __bad_unaligned_access_size(void); - -#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ - __bad_unaligned_access_size())))); \ - })) - -#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ - __bad_unaligned_access_size())))); \ - })) - -#define __put_unaligned_le(val, ptr) ({ \ - void *__gu_p = (ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(u8 *)__gu_p = (__force u8)(val); \ - break; \ - case 2: \ - put_unaligned_le16((__force u16)(val), __gu_p); \ - break; \ - case 4: \ - put_unaligned_le32((__force u32)(val), __gu_p); \ - break; \ - case 8: \ - put_unaligned_le64((__force u64)(val), __gu_p); \ - break; \ - default: \ - __bad_unaligned_access_size(); \ - break; \ - } \ - (void)0; }) - -#define __put_unaligned_be(val, ptr) ({ \ - void *__gu_p = (ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(u8 *)__gu_p = (__force u8)(val); \ - break; \ - case 2: \ - put_unaligned_be16((__force u16)(val), __gu_p); \ - break; \ - case 4: \ - put_unaligned_be32((__force u32)(val), __gu_p); \ - break; \ - case 8: \ - put_unaligned_be64((__force u64)(val), __gu_p); \ - break; \ - default: \ - __bad_unaligned_access_size(); \ - break; \ - } \ - (void)0; }) - -static inline u32 __get_unaligned_be24(const u8 *p) -{ - return p[0] << 16 | p[1] << 8 | p[2]; -} - -static inline u32 get_unaligned_be24(const void *p) -{ - return __get_unaligned_be24(p); -} - -static inline u32 __get_unaligned_le24(const u8 *p) -{ - return p[0] | p[1] << 8 | p[2] << 16; -} - -static inline u32 get_unaligned_le24(const void *p) -{ - return __get_unaligned_le24(p); -} - -static inline void __put_unaligned_be24(const u32 val, u8 *p) -{ - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; -} - -static inline void put_unaligned_be24(const u32 val, void *p) -{ - __put_unaligned_be24(val, p); -} - -static inline void __put_unaligned_le24(const u32 val, u8 *p) -{ - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; -} - -static inline void put_unaligned_le24(const u32 val, void *p) -{ - __put_unaligned_le24(val, p); -} - -#endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h deleted file mode 100644 index 2248dcb0df76..000000000000 --- a/include/linux/unaligned/le_byteshift.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H -#define _LINUX_UNALIGNED_LE_BYTESHIFT_H - -#include <linux/types.h> - -static inline u16 __get_unaligned_le16(const u8 *p) -{ - return p[0] | p[1] << 8; -} - -static inline u32 __get_unaligned_le32(const u8 *p) -{ - return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; -} - -static inline u64 __get_unaligned_le64(const u8 *p) -{ - return (u64)__get_unaligned_le32(p + 4) << 32 | - __get_unaligned_le32(p); -} - -static inline void __put_unaligned_le16(u16 val, u8 *p) -{ - *p++ = val; - *p++ = val >> 8; -} - -static inline void __put_unaligned_le32(u32 val, u8 *p) -{ - __put_unaligned_le16(val >> 16, p + 2); - __put_unaligned_le16(val, p); -} - -static inline void __put_unaligned_le64(u64 val, u8 *p) -{ - __put_unaligned_le32(val >> 32, p + 4); - __put_unaligned_le32(val, p); -} - -static inline u16 get_unaligned_le16(const void *p) -{ - return __get_unaligned_le16(p); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return __get_unaligned_le32(p); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return __get_unaligned_le64(p); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ - __put_unaligned_le16(val, p); -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ - __put_unaligned_le32(val, p); -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ - __put_unaligned_le64(val, p); -} - -#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h deleted file mode 100644 index 9202e864d026..000000000000 --- a/include/linux/unaligned/le_memmove.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H -#define _LINUX_UNALIGNED_LE_MEMMOVE_H - -#include <linux/unaligned/memmove.h> - -static inline u16 get_unaligned_le16(const void *p) -{ - return __get_unaligned_memmove16((const u8 *)p); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return __get_unaligned_memmove32((const u8 *)p); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return __get_unaligned_memmove64((const u8 *)p); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ - __put_unaligned_memmove16(val, p); -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ - __put_unaligned_memmove32(val, p); -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ - __put_unaligned_memmove64(val, p); -} - -#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h deleted file mode 100644 index 9977987883a6..000000000000 --- a/include/linux/unaligned/le_struct.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_LE_STRUCT_H -#define _LINUX_UNALIGNED_LE_STRUCT_H - -#include <linux/unaligned/packed_struct.h> - -static inline u16 get_unaligned_le16(const void *p) -{ - return __get_unaligned_cpu16((const u8 *)p); -} - -static inline u32 get_unaligned_le32(const void *p) -{ - return __get_unaligned_cpu32((const u8 *)p); -} - -static inline u64 get_unaligned_le64(const void *p) -{ - return __get_unaligned_cpu64((const u8 *)p); -} - -static inline void put_unaligned_le16(u16 val, void *p) -{ - __put_unaligned_cpu16(val, p); -} - -static inline void put_unaligned_le32(u32 val, void *p) -{ - __put_unaligned_cpu32(val, p); -} - -static inline void put_unaligned_le64(u64 val, void *p) -{ - __put_unaligned_cpu64(val, p); -} - -#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h deleted file mode 100644 index ac71b53bc6dc..000000000000 --- a/include/linux/unaligned/memmove.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_UNALIGNED_MEMMOVE_H -#define _LINUX_UNALIGNED_MEMMOVE_H - -#include <linux/kernel.h> -#include <linux/string.h> - -/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ - -static inline u16 __get_unaligned_memmove16(const void *p) -{ - u16 tmp; - memmove(&tmp, p, 2); - return tmp; -} - -static inline u32 __get_unaligned_memmove32(const void *p) -{ - u32 tmp; - memmove(&tmp, p, 4); - return tmp; -} - -static inline u64 __get_unaligned_memmove64(const void *p) -{ - u64 tmp; - memmove(&tmp, p, 8); - return tmp; -} - -static inline void __put_unaligned_memmove16(u16 val, void *p) -{ - memmove(p, &val, 2); -} - -static inline void __put_unaligned_memmove32(u32 val, void *p) -{ - memmove(p, &val, 4); -} - -static inline void __put_unaligned_memmove64(u64 val, void *p) -{ - memmove(p, &val, 8); -} - -#endif /* _LINUX_UNALIGNED_MEMMOVE_H */ diff --git a/include/linux/units.h b/include/linux/units.h index dcc30a53fa93..681fc652e3d7 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,9 +4,29 @@ #include <linux/math.h> -#define MILLIWATT_PER_WATT 1000L -#define MICROWATT_PER_MILLIWATT 1000L -#define MICROWATT_PER_WATT 1000000L +/* Metric prefixes in accordance with Système international (d'unités) */ +#define PETA 1000000000000000ULL +#define TERA 1000000000000ULL +#define GIGA 1000000000UL +#define MEGA 1000000UL +#define KILO 1000UL +#define HECTO 100UL +#define DECA 10UL +#define DECI 10UL +#define CENTI 100UL +#define MILLI 1000UL +#define MICRO 1000000UL +#define NANO 1000000000UL +#define PICO 1000000000000ULL +#define FEMTO 1000000000000000ULL + +#define HZ_PER_KHZ 1000UL +#define KHZ_PER_MHZ 1000UL +#define HZ_PER_MHZ 1000000UL + +#define MILLIWATT_PER_WATT 1000UL +#define MICROWATT_PER_MILLIWATT 1000UL +#define MICROWATT_PER_WATT 1000000UL #define ABSOLUTE_ZERO_MILLICELSIUS -273150 diff --git a/include/linux/usb.h b/include/linux/usb.h index eaae24217e8a..7ccaa76a9a96 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -473,12 +473,6 @@ struct usb_dev_state; struct usb_tt; -enum usb_device_removable { - USB_DEVICE_REMOVABLE_UNKNOWN = 0, - USB_DEVICE_REMOVABLE, - USB_DEVICE_FIXED, -}; - enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG, @@ -703,7 +697,6 @@ struct usb_device { #endif struct wusb_dev *wusb_dev; int slot_id; - enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; @@ -1485,7 +1478,7 @@ typedef void (*usb_complete_t)(struct urb *); * * Note that transfer_buffer must still be set if the controller * does not support DMA (as indicated by hcd_uses_dma()) and when talking - * to root hub. If you have to trasfer between highmem zone and the device + * to root hub. If you have to transfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA * capable, assign NULL to it, so that usbmon knows not to use the value. diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index ead8c9a47c6a..8fc2abd7aecb 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -156,6 +156,20 @@ struct uac2_feature_unit_descriptor { __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); +#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac2_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __le32 bmaControls[ch + 1]; \ + __u8 iFeature; \ +} __packed + /* 4.7.2.10 Effect Unit Descriptor */ struct uac2_effect_unit_descriptor { diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h index 9b895f93d8de..9f5a51f79ba5 100644 --- a/include/linux/usb/cdc-wdm.h +++ b/include/linux/usb/cdc-wdm.h @@ -12,11 +12,12 @@ #ifndef __LINUX_USB_CDC_WDM_H #define __LINUX_USB_CDC_WDM_H +#include <linux/wwan.h> #include <uapi/linux/usb/cdc-wdm.h> extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, - int bufsize, + int bufsize, enum wwan_port_type type, int (*manage_power)(struct usb_interface *, int)); #endif /* __LINUX_USB_CDC_WDM_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index c71150f2c639..9d2762279286 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -271,7 +271,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, * @bConfigurationValue: Copied into configuration descriptor. * @iConfiguration: Copied into configuration descriptor. * @bmAttributes: Copied into configuration descriptor. - * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the + * @MaxPower: Power consumption in mA. Used to compute bMaxPower in the * configuration descriptor after considering the bus speed. * @cdev: assigned by @usb_add_config() before calling @bind(); this is * the device associated with this configuration. diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 78e006355557..c892c5bc6638 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -45,6 +45,7 @@ struct ehci_caps { #define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ +#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */ u32 hcc_params; /* HCCPARAMS - offset 0x8 */ /* EHCI 1.1 addendum */ @@ -126,8 +127,9 @@ struct ehci_regs { u32 configured_flag; #define FLAG_CF (1<<0) /* true: we'll support "high speed" */ - /* PORTSC: offset 0x44 */ - u32 port_status[0]; /* up to N_PORTS */ + union { + /* PORTSC: offset 0x44 */ + u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ #define PORTSC_SUSPEND_STS_ACK 0 #define PORTSC_SUSPEND_STS_NYET 1 @@ -164,28 +166,35 @@ struct ehci_regs { #define PORT_CSC (1<<1) /* connect status change */ #define PORT_CONNECT (1<<0) /* device connected */ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) - - u32 reserved3[9]; - - /* USBMODE: offset 0x68 */ - u32 usbmode; /* USB Device mode */ + struct { + u32 reserved3[9]; + /* USBMODE: offset 0x68 */ + u32 usbmode; /* USB Device mode */ + }; #define USBMODE_SDIS (1<<3) /* Stream disable */ #define USBMODE_BE (1<<2) /* BE/LE endianness select */ #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ - - u32 reserved4[6]; + }; /* Moorestown has some non-standard registers, partially due to the fact that * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to * PORTSCx */ - /* HOSTPC: offset 0x84 */ - u32 hostpc[0]; /* HOSTPC extension */ + union { + struct { + u32 reserved4; + /* HOSTPC: offset 0x84 */ + u32 hostpc[HCS_N_PORTS_MAX]; #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ + }; + + /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */ + u32 brcm_insnreg[4]; + }; - u32 reserved5[17]; + u32 reserved5[2]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index ee04ef214ce8..10fe57cf40be 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -197,7 +197,7 @@ struct usb_ep_caps { * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" * @ops: Function pointers used to access hardware-specific operations. * @ep_list:the gadget's ep_list holds all of its endpoints - * @caps:The structure describing types and directions supported by endoint. + * @caps:The structure describing types and directions supported by endpoint. * @enabled: The current endpoint enabled/disabled state. * @claimed: True if this endpoint is claimed by a function. * @maxpacket:The maximum packet size used on this endpoint. The initial @@ -325,9 +325,11 @@ struct usb_gadget_ops { void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); void (*udc_set_ssp_rate)(struct usb_gadget *gadget, enum usb_ssp_rate rate); + void (*udc_async_callbacks)(struct usb_gadget *gadget, bool enable); struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); + int (*check_config)(struct usb_gadget *gadget); }; /** @@ -490,7 +492,7 @@ extern char *usb_get_gadget_udc_name(void); */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc); return round_up(len, max_packet_size); } @@ -607,6 +609,7 @@ int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); +int usb_gadget_check_config(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } @@ -630,6 +633,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } +static inline int usb_gadget_check_config(struct usb_gadget *gadget) +{ return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 96281cd50ff6..548a028f2dab 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -59,7 +59,7 @@ * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. - * This framework is a layer over that, and should be more sharable. + * This framework is a layer over that, and should be more shareable. */ /*-------------------------------------------------------------------------*/ @@ -299,7 +299,7 @@ struct hc_driver { * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, - * such as alignment contraints. If these are not specified, the + * such as alignment constraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) @@ -409,7 +409,10 @@ struct hc_driver { int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); - + /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */ +#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 + int (*submit_single_step_set_feature)(struct usb_hcd *, + struct urb *, int); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) @@ -474,6 +477,14 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); +#ifdef CONFIG_USB_HCD_TEST_MODE +extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port); +#else +static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) +{ + return 0; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ #ifdef CONFIG_USB_PCI struct pci_dev; diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h deleted file mode 100644 index b75ded28db81..000000000000 --- a/include/linux/usb/isp1760.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * board initialization should put one of these into dev->platform_data - * and place the isp1760 onto platform_bus named "isp1760-hcd". - */ - -#ifndef __LINUX_USB_ISP1760_H -#define __LINUX_USB_ISP1760_H - -struct isp1760_platform_data { - unsigned is_isp1761:1; /* Chip is ISP1761 */ - unsigned bus_width_16:1; /* 16/32-bit data bus width */ - unsigned port1_otg:1; /* Port 1 supports OTG */ - unsigned analog_oc:1; /* Analog overcurrent */ - unsigned dack_polarity_high:1; /* DACK active high */ - unsigned dreq_polarity_high:1; /* DREQ active high */ -}; - -#endif /* __LINUX_USB_ISP1760_H */ diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index e78eb577d0fa..784659d4dc99 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -98,7 +98,7 @@ enum otg_fsm_timer { * @b_bus_req: TRUE during the time that the Application running on the * B-device wants to use the bus * - * Auxilary inputs (OTG v1.3 only. Obsolete now.) + * Auxiliary inputs (OTG v1.3 only. Obsolete now.) * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD * @b_bus_suspend: TRUE when the A-device detects that the B-device has put * the bus into suspend @@ -153,7 +153,7 @@ struct otg_fsm { int a_bus_req; int b_bus_req; - /* Auxilary inputs */ + /* Auxiliary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; @@ -177,7 +177,7 @@ struct otg_fsm { int a_bus_req_inf; int a_clr_err_inf; int b_bus_req_inf; - /* Auxilary informative variables */ + /* Auxiliary informative variables */ int a_suspend_req_inf; /* Timeout indicator for timers */ @@ -196,6 +196,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool hnp_work_inited; bool state_changed; }; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 69f1b6328532..6475f880be37 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -125,8 +125,9 @@ enum usb_dr_mode { * @dev: Pointer to the given device * * The function gets phy interface string from property 'dr_mode', - * and returns the correspondig enum usb_dr_mode + * and returns the corresponding enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); +extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev); #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index bf00259493e0..96b7ff66f074 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */ #define PD_T_SOURCE_ACTIVITY 45 #define PD_T_SINK_ACTIVITY 135 -#define PD_T_SINK_WAIT_CAP 240 +#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */ #define PD_T_PS_TRANSITION 500 #define PD_T_SRC_TRANSITION 35 #define PD_T_DRP_SNK 40 diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h index 0eb83ce19597..b517ebc8f0ff 100644 --- a/include/linux/usb/pd_ext_sdb.h +++ b/include/linux/usb/pd_ext_sdb.h @@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields { #define USB_PD_EXT_SDB_EVENT_OVP BIT(3) #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4) -#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \ - USB_PD_EXT_SDB_EVENT_OTP | \ - USB_PD_EXT_SDB_EVENT_OVP) - #endif /* __LINUX_USB_PD_EXT_SDB_H */ diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 5e4c497f54d6..eeb7c2157c72 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -32,7 +32,7 @@ #define USB_QUIRK_DELAY_INIT BIT(6) /* - * For high speed and super speed interupt endpoints, the USB 2.0 and + * For high speed and super speed interrupt endpoints, the USB 2.0 and * USB 3.0 spec require the interval in microframes * (1 microframe = 125 microseconds) to be calculated as * interval = 2 ^ (bInterval-1). diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index 0164fed31b06..031f148ab373 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -65,6 +65,7 @@ void usb_role_switch_unregister(struct usb_role_switch *sw); void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data); void *usb_role_switch_get_drvdata(struct usb_role_switch *sw); +const char *usb_role_string(enum usb_role role); #else static inline int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) @@ -109,6 +110,11 @@ static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw) return NULL; } +static inline const char *usb_role_string(enum usb_role role) +{ + return "unknown"; +} + #endif #endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 8c63fa9bfc74..16ea5a4cc586 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -276,7 +276,7 @@ struct usb_serial_driver { int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ - int (*write_room)(struct tty_struct *tty); + unsigned int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); @@ -284,7 +284,7 @@ struct usb_serial_driver { void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); - int (*chars_in_buffer)(struct tty_struct *tty); + unsigned int (*chars_in_buffer)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, long timeout); bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); @@ -347,8 +347,8 @@ int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *por const unsigned char *buf, int count); void usb_serial_generic_close(struct usb_serial_port *port); int usb_serial_generic_resume(struct usb_serial *serial); -int usb_serial_generic_write_room(struct tty_struct *tty); -int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +unsigned int usb_serial_generic_write_room(struct tty_struct *tty); +unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); void usb_serial_generic_read_bulk_callback(struct urb *urb); void usb_serial_generic_write_bulk_callback(struct urb *urb); @@ -395,7 +395,7 @@ static inline void usb_serial_debug_data(struct device *dev, } /* - * Macro for reporting errors in write path to avoid inifinite loop + * Macro for reporting errors in write path to avoid infinite loop * when port is used as a console. */ #define dev_err_console(usport, fmt, ...) \ diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 42fcfbe10590..bffc8d3e14ad 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -66,6 +66,8 @@ enum tcpm_transmit_type { * For example, some tcpcs may include BC1.2 charger detection * and use that in this case. * @set_cc: Called to set value of CC pins + * @apply_rc: Optional; Needed to move TCPCI based chipset to APPLY_RC state + * as stated by the TCPCI specification. * @get_cc: Called to read current CC pin values * @set_polarity: * Called to set polarity @@ -120,6 +122,8 @@ struct tcpc_dev { int (*get_vbus)(struct tcpc_dev *dev); int (*get_current_limit)(struct tcpc_dev *dev); int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc); + int (*apply_rc)(struct tcpc_dev *dev, enum typec_cc_status cc, + enum typec_cc_polarity polarity); int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1, enum typec_cc_status *cc2); int (*set_polarity)(struct tcpc_dev *dev, diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index fc4c7edb2e8a..cfb916cccd31 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -97,7 +97,7 @@ enum { #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) -/* Helper for setting/getting the pin assignement value to the configuration */ +/* Helper for setting/getting the pin assignment value to the configuration */ #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 1d08dbbcfe32..eb70cabe6e7f 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -54,9 +54,15 @@ enum ucount_type { UCOUNT_FANOTIFY_GROUPS, UCOUNT_FANOTIFY_MARKS, #endif + UCOUNT_RLIMIT_NPROC, + UCOUNT_RLIMIT_MSGQUEUE, + UCOUNT_RLIMIT_SIGPENDING, + UCOUNT_RLIMIT_MEMLOCK, UCOUNT_COUNTS, }; +#define MAX_PER_NAMESPACE_UCOUNTS UCOUNT_RLIMIT_NPROC + struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; @@ -92,23 +98,42 @@ struct user_namespace { struct ctl_table_header *sysctls; #endif struct ucounts *ucounts; - int ucount_max[UCOUNT_COUNTS]; + long ucount_max[UCOUNT_COUNTS]; } __randomize_layout; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; - int count; - atomic_t ucount[UCOUNT_COUNTS]; + atomic_t count; + atomic_long_t ucount[UCOUNT_COUNTS]; }; extern struct user_namespace init_user_ns; +extern struct ucounts init_ucounts; bool setup_userns_sysctls(struct user_namespace *ns); void retire_userns_sysctls(struct user_namespace *ns); struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); void dec_ucount(struct ucounts *ucounts, enum ucount_type type); +struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid); +struct ucounts * __must_check get_ucounts(struct ucounts *ucounts); +void put_ucounts(struct ucounts *ucounts); + +static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type) +{ + return atomic_long_read(&ucounts->ucount[type]); +} + +long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); +bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v); +bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max); + +static inline void set_rlimit_ucount_max(struct user_namespace *ns, + enum ucount_type type, unsigned long max) +{ + ns->ucount_max[type] = max <= LONG_MAX ? max : LONG_MAX; +} #ifdef CONFIG_USER_NS diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 794d1538b8ba..33cea484d1ad 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -53,18 +53,23 @@ enum mcopy_atomic_mode { MCOPY_ATOMIC_CONTINUE, }; +extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, struct page *page, + bool newly_allocated, bool wp_copy); + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool *mmap_changing, __u64 mode); + atomic_t *mmap_changing, __u64 mode); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, - bool *mmap_changing); + atomic_t *mmap_changing); extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, - unsigned long len, bool *mmap_changing); + unsigned long len, atomic_t *mmap_changing); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, - bool enable_wp, bool *mmap_changing); + bool enable_wp, atomic_t *mmap_changing); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index f311d227aa1b..3972ab765de1 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -28,13 +28,34 @@ struct vdpa_notification_area { }; /** - * struct vdpa_vq_state - vDPA vq_state definition + * struct vdpa_vq_state_split - vDPA split virtqueue state * @avail_index: available index */ -struct vdpa_vq_state { +struct vdpa_vq_state_split { u16 avail_index; }; +/** + * struct vdpa_vq_state_packed - vDPA packed virtqueue state + * @last_avail_counter: last driver ring wrap counter observed by device + * @last_avail_idx: device available index + * @last_used_counter: device ring wrap counter + * @last_used_idx: used index + */ +struct vdpa_vq_state_packed { + u16 last_avail_counter:1; + u16 last_avail_idx:15; + u16 last_used_counter:1; + u16 last_used_idx:15; +}; + +struct vdpa_vq_state { + union { + struct vdpa_vq_state_split split; + struct vdpa_vq_state_packed packed; + }; +}; + struct vdpa_mgmt_dev; /** @@ -44,6 +65,7 @@ struct vdpa_mgmt_dev; * @config: the configuration ops for this device. * @index: device index * @features_valid: were features initialized? for legacy guests + * @use_va: indicate whether virtual address must be used by this device * @nvqs: maximum number of supported virtqueues * @mdev: management device pointer; caller must setup when registering device as part * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). @@ -54,6 +76,7 @@ struct vdpa_device { const struct vdpa_config_ops *config; unsigned int index; bool features_valid; + bool use_va; int nvqs; struct vdpa_mgmt_dev *mdev; }; @@ -69,6 +92,16 @@ struct vdpa_iova_range { }; /** + * Corresponding file area for device memory mapping + * @file: vma->vm_file for the mapping + * @offset: mapping offset in the vm_file + */ +struct vdpa_map_file { + struct file *file; + u64 offset; +}; + +/** * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following @@ -110,7 +143,7 @@ struct vdpa_iova_range { * @vdev: vdpa device * @idx: virtqueue index * @state: pointer to returned state (last_avail_idx) - * @get_vq_notification: Get the notification area for a virtqueue + * @get_vq_notification: Get the notification area for a virtqueue * @vdev: vdpa device * @idx: virtqueue index * Returns the notifcation area @@ -150,6 +183,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @reset: Reset device + * @vdev: vdpa device + * Returns integer: success (0) or error (< 0) * @get_config_size: Get the size of the configuration space * @vdev: vdpa device * Returns size_t: configuration size @@ -234,6 +270,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + int (*reset)(struct vdpa_device *vdev); size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); @@ -245,7 +282,7 @@ struct vdpa_config_ops { /* DMA ops */ int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb); int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size, - u64 pa, u32 perm); + u64 pa, u32 perm, void *opaque); int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size); /* Free device resources */ @@ -254,14 +291,27 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - size_t size, const char *name); + size_t size, const char *name, + bool use_va); -#define vdpa_alloc_device(dev_struct, member, parent, config, name) \ +/** + * vdpa_alloc_device - allocate and initilaize a vDPA device + * + * @dev_struct: the type of the parent structure + * @member: the name of struct vdpa_device within the @dev_struct + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @name: name of the vdpa device + * @use_va: indicate whether virtual address must be used by this device + * + * Return allocated data structure or ERR_PTR upon error + */ +#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \ container_of(__vdpa_alloc_device( \ parent, config, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - dev_struct, member)), name), \ + dev_struct, member)), name, use_va), \ dev_struct, member) int vdpa_register_device(struct vdpa_device *vdev, int nvqs); @@ -316,27 +366,27 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) return vdev->dma_dev; } -static inline void vdpa_reset(struct vdpa_device *vdev) +static inline int vdpa_reset(struct vdpa_device *vdev) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; vdev->features_valid = false; - ops->set_status(vdev, 0); + return ops->reset(vdev); } static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; vdev->features_valid = true; - return ops->set_features(vdev, features); + return ops->set_features(vdev, features); } - -static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, - void *buf, unsigned int len) +static inline void vdpa_get_config(struct vdpa_device *vdev, + unsigned int offset, void *buf, + unsigned int len) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; /* * Config accesses aren't supposed to trigger before features are set. diff --git a/include/linux/vfio.h b/include/linux/vfio.h index a2c5b30e1763..b53a9557884a 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -15,13 +15,28 @@ #include <linux/poll.h> #include <uapi/linux/vfio.h> +/* + * VFIO devices can be placed in a set, this allows all devices to share this + * structure and the VFIO core will provide a lock that is held around + * open_device()/close_device() for all devices in the set. + */ +struct vfio_device_set { + void *set_id; + struct mutex lock; + struct list_head device_list; + unsigned int device_count; +}; + struct vfio_device { struct device *dev; const struct vfio_device_ops *ops; struct vfio_group *group; + struct vfio_device_set *dev_set; + struct list_head dev_set_list; /* Members below here are private, not for driver use */ refcount_t refcount; + unsigned int open_count; struct completion comp; struct list_head group_next; }; @@ -29,8 +44,8 @@ struct vfio_device { /** * struct vfio_device_ops - VFIO bus driver device callbacks * - * @open: Called when userspace creates new file descriptor for device - * @release: Called when userspace releases file descriptor for device + * @open_device: Called when the first file descriptor is opened for this device + * @close_device: Opposite of open_device * @read: Perform read(2) on device file descriptor * @write: Perform write(2) on device file descriptor * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* @@ -43,8 +58,8 @@ struct vfio_device { */ struct vfio_device_ops { char *name; - int (*open)(struct vfio_device *vdev); - void (*release)(struct vfio_device *vdev); + int (*open_device)(struct vfio_device *vdev); + void (*close_device)(struct vfio_device *vdev); ssize_t (*read)(struct vfio_device *vdev, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(struct vfio_device *vdev, const char __user *buf, @@ -61,11 +76,14 @@ extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); void vfio_init_group_dev(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); +void vfio_uninit_group_dev(struct vfio_device *device); int vfio_register_group_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); +int vfio_assign_device_set(struct vfio_device *device, void *set_id); + /* events for the backend driver notify callback */ enum vfio_iommu_notify_type { VFIO_IOMMU_CONTAINER_CLOSE = 0, diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h new file mode 100644 index 000000000000..ef9a44b6cf5d --- /dev/null +++ b/include/linux/vfio_pci_core.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/vfio.h> +#include <linux/irqbypass.h> +#include <linux/types.h> +#include <linux/uuid.h> +#include <linux/notifier.h> + +#ifndef VFIO_PCI_CORE_H +#define VFIO_PCI_CORE_H + +#define VFIO_PCI_OFFSET_SHIFT 40 + +#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +/* Special capability IDs predefined access */ +#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ +#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ + +/* Cap maximum number of ioeventfds per device (arbitrary) */ +#define VFIO_PCI_IOEVENTFD_MAX 1000 + +struct vfio_pci_ioeventfd { + struct list_head next; + struct vfio_pci_core_device *vdev; + struct virqfd *virqfd; + void __iomem *addr; + uint64_t data; + loff_t pos; + int bar; + int count; + bool test_mem; +}; + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +struct vfio_pci_core_device; +struct vfio_pci_region; + +struct vfio_pci_regops { + ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + void (*release)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region); + int (*mmap)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region, + struct vm_area_struct *vma); + int (*add_capability)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region, + struct vfio_info_cap *caps); +}; + +struct vfio_pci_region { + u32 type; + u32 subtype; + const struct vfio_pci_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +struct vfio_pci_core_device { + struct vfio_device vdev; + struct pci_dev *pdev; + void __iomem *barmap[PCI_STD_NUM_BARS]; + bool bar_mmap_supported[PCI_STD_NUM_BARS]; + u8 *pci_config_map; + u8 *vconfig; + struct perm_bits *msi_perm; + spinlock_t irqlock; + struct mutex igate; + struct vfio_pci_irq_ctx *ctx; + int num_ctx; + int irq_type; + int num_regions; + struct vfio_pci_region *region; + u8 msi_qmax; + u8 msix_bar; + u16 msix_size; + u32 msix_offset; + u32 rbar[7]; + bool pci_2_3; + bool virq_disabled; + bool reset_works; + bool extended_caps; + bool bardirty; + bool has_vga; + bool needs_reset; + bool nointx; + bool needs_pm_restore; + struct pci_saved_state *pci_saved_state; + struct pci_saved_state *pm_save; + int ioeventfds_nr; + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + struct vfio_pci_vf_token *vf_token; + struct notifier_block nb; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; +}; + +#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) +#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX) +#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX) +#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) +#define irq_is(vdev, type) (vdev->irq_type == type) + +extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); +extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); + +extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, void *data); + +extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, + uint64_t data, int count, int fd); + +extern int vfio_pci_init_perm_bits(void); +extern void vfio_pci_uninit_perm_bits(void); + +extern int vfio_config_init(struct vfio_pci_core_device *vdev); +extern void vfio_config_free(struct vfio_pci_core_device *vdev); + +extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); + +extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, + pci_power_t state); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, + u16 cmd); + +#ifdef CONFIG_VFIO_PCI_IGD +extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); +#else +static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_S390 +extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps); +#else +static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps) +{ + return -ENODEV; +} +#endif + +/* Will be exported for vfio pci drivers usage */ +void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga, + bool is_disable_idle_d3); +void vfio_pci_core_close_device(struct vfio_device *core_vdev); +void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, + struct pci_dev *pdev, + const struct vfio_device_ops *vfio_pci_ops); +int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev); +void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev); +void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev); +int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn); +extern const struct pci_error_handlers vfio_pci_core_err_handlers; +long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg); +ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos); +ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, + size_t count, loff_t *ppos); +int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma); +void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count); +int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); +int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); +void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); +void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); + +static inline bool vfio_pci_is_vga(struct pci_dev *pdev) +{ + return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; +} + +#endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index dc6ddce92066..b4b9137f9792 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -33,6 +33,8 @@ #include <video/vga.h> +struct pci_dev; + /* Legacy VGA regions */ #define VGA_RSRC_NONE 0x00 #define VGA_RSRC_LEGACY_IO 0x01 @@ -42,42 +44,45 @@ #define VGA_RSRC_NORMAL_IO 0x04 #define VGA_RSRC_NORMAL_MEM 0x08 -/* Passing that instead of a pci_dev to use the system "default" - * device, that is the one used by vgacon. Archs will probably - * have to provide their own vga_default_device(); - */ -#define VGA_DEFAULT_DEVICE (NULL) - -struct pci_dev; - -/* For use by clients */ - -/** - * vga_set_legacy_decoding - * - * @pdev: pci device of the VGA card - * @decodes: bit mask of what legacy regions the card decodes - * - * Indicates to the arbiter if the card decodes legacy VGA IOs, - * legacy VGA Memory, both, or none. All cards default to both, - * the card driver (fbdev for example) should tell the arbiter - * if it has disabled legacy decoding, so the card can be left - * out of the arbitration process (and can be safe to take - * interrupts at any time. - */ -#if defined(CONFIG_VGA_ARB) -extern void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes); -#else +#ifdef CONFIG_VGA_ARB +void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); +int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); +void vga_put(struct pci_dev *pdev, unsigned int rsrc); +struct pci_dev *vga_default_device(void); +void vga_set_default_device(struct pci_dev *pdev); +int vga_remove_vgacon(struct pci_dev *pdev); +int vga_client_register(struct pci_dev *pdev, + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)); +#else /* CONFIG_VGA_ARB */ static inline void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes) { }; -#endif - -#if defined(CONFIG_VGA_ARB) -extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); -#else -static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } -#endif + unsigned int decodes) +{ +}; +static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, + int interruptible) +{ + return 0; +} +static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) +{ +} +static inline struct pci_dev *vga_default_device(void) +{ + return NULL; +} +static inline void vga_set_default_device(struct pci_dev *pdev) +{ +} +static inline int vga_remove_vgacon(struct pci_dev *pdev) +{ + return 0; +} +static inline int vga_client_register(struct pci_dev *pdev, + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)) +{ + return 0; +} +#endif /* CONFIG_VGA_ARB */ /** * vga_get_interruptible @@ -109,48 +114,9 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, return vga_get(pdev, rsrc, 0); } -#if defined(CONFIG_VGA_ARB) -extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); -#else -static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) +static inline void vga_client_unregister(struct pci_dev *pdev) { + vga_client_register(pdev, NULL); } -#endif - - -#ifdef CONFIG_VGA_ARB -extern struct pci_dev *vga_default_device(void); -extern void vga_set_default_device(struct pci_dev *pdev); -extern int vga_remove_vgacon(struct pci_dev *pdev); -#else -static inline struct pci_dev *vga_default_device(void) { return NULL; } -static inline void vga_set_default_device(struct pci_dev *pdev) { } -static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } -#endif - -/* - * Architectures should define this if they have several - * independent PCI domains that can afford concurrent VGA - * decoding - */ -#ifndef __ARCH_HAS_VGA_CONFLICT -static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) -{ - return 1; -} -#endif - -#if defined(CONFIG_VGA_ARB) -int vga_client_register(struct pci_dev *pdev, void *cookie, - void (*irq_set_state)(void *cookie, bool state), - unsigned int (*set_vga_decode)(void *cookie, bool state)); -#else -static inline int vga_client_register(struct pci_dev *pdev, void *cookie, - void (*irq_set_state)(void *cookie, bool state), - unsigned int (*set_vga_decode)(void *cookie, bool state)) -{ - return 0; -} -#endif #endif /* LINUX_VGA_H */ diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h index 6b09b786a762..2d0e2f52f938 100644 --- a/include/linux/vhost_iotlb.h +++ b/include/linux/vhost_iotlb.h @@ -17,6 +17,7 @@ struct vhost_iotlb_map { u32 perm; u32 flags_padding; u64 __subtree_last; + void *opaque; }; #define VHOST_IOTLB_FLAG_RETIRE 0x1 @@ -29,6 +30,8 @@ struct vhost_iotlb { unsigned int flags; }; +int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last, + u64 addr, unsigned int perm, void *opaque); int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last, u64 addr, unsigned int perm); void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index b1894e0323fa..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -110,6 +110,7 @@ struct virtio_device { bool config_enabled; bool config_change_pending; spinlock_t config_lock; + spinlock_t vqs_list_lock; /* Protects VQs list access */ struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 6a95b58fd0f4..eb2bd9b4077d 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -79,6 +79,7 @@ static inline void vp_iowrite64_twopart(u64 val, } u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); +u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev); void vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features); u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index dc636b727179..35d7eedb5e8e 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -36,6 +36,7 @@ struct virtio_vsock_sock { u32 rx_bytes; u32 buf_alloc; struct list_head rx_queue; + u32 msg_count; }; struct virtio_vsock_pkt { @@ -80,8 +81,17 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); +int +virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, + struct msghdr *msg, + size_t len); +ssize_t +virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags); s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); +u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); int virtio_transport_do_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk); diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ae0dd1948c2b..a185cc75ff52 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -33,6 +33,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGREUSE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, + PGDEMOTE_KSWAPD, + PGDEMOTE_DIRECT, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4d668abb6391..671d402c3778 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -29,7 +29,7 @@ struct notifier_block; /* in notifier.h */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* - * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. + * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC. * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that @@ -104,6 +104,21 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) } #endif +#ifndef arch_vmap_pte_range_map_size +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, + u64 pfn, unsigned int max_page_shift) +{ + return PAGE_SIZE; +} +#endif + +#ifndef arch_vmap_pte_supported_shift +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + return PAGE_SHIFT; +} +#endif + /* * Highlevel APIs for driver use */ @@ -135,6 +150,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, const void *caller); void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller); +void *vmalloc_no_huge(unsigned long size); extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); @@ -209,9 +225,6 @@ static inline bool is_vm_area_hugepages(const void *addr) } #ifdef CONFIG_MMU -int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift); void vunmap_range(unsigned long addr, unsigned long end); static inline void set_vm_flush_reset_perms(void *addr) { @@ -231,7 +244,7 @@ static inline void set_vm_flush_reset_perms(void *addr) extern long vread(char *buf, char *addr, unsigned long count); /* - * Internals. Dont't use.. + * Internals. Don't use.. */ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 6d28bc433c1c..6a2f51ebbfd3 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -37,7 +37,7 @@ extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); -extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); +extern struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr); extern int vmpressure_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3299cd69e4ca..d6a6cf53b127 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -138,34 +138,27 @@ static inline void vm_events_fold_cpu(int cpu) * Zone and node-based page accounting with per cpu differentials. */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; -extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; +extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #ifdef CONFIG_NUMA -static inline void zone_numa_state_add(long x, struct zone *zone, - enum numa_stat_item item) +static inline void zone_numa_event_add(long x, struct zone *zone, + enum numa_stat_item item) { - atomic_long_add(x, &zone->vm_numa_stat[item]); - atomic_long_add(x, &vm_numa_stat[item]); + atomic_long_add(x, &zone->vm_numa_event[item]); + atomic_long_add(x, &vm_numa_event[item]); } -static inline unsigned long global_numa_state(enum numa_stat_item item) +static inline unsigned long zone_numa_event_state(struct zone *zone, + enum numa_stat_item item) { - long x = atomic_long_read(&vm_numa_stat[item]); - - return x; + return atomic_long_read(&zone->vm_numa_event[item]); } -static inline unsigned long zone_numa_state_snapshot(struct zone *zone, - enum numa_stat_item item) +static inline unsigned long +global_numa_event_state(enum numa_stat_item item) { - long x = atomic_long_read(&zone->vm_numa_stat[item]); - int cpu; - - for_each_online_cpu(cpu) - x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; - - return x; + return atomic_long_read(&vm_numa_event[item]); } #endif /* CONFIG_NUMA */ @@ -236,7 +229,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) - x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; @@ -245,18 +238,38 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, } #ifdef CONFIG_NUMA -extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); +/* See __count_vm_event comment on why raw_cpu_inc is used. */ +static inline void +__count_numa_event(struct zone *zone, enum numa_stat_item item) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_inc(pzstats->vm_numa_event[item]); +} + +static inline void +__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_add(pzstats->vm_numa_event[item], delta); +} + extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); -extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); +extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); extern unsigned long node_page_state_pages(struct pglist_data *pgdat, enum node_stat_item item); +extern void fold_vm_numa_events(void); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) #define node_page_state_pages(node, item) global_node_page_state_pages(item) +static inline void fold_vm_numa_events(void) +{ +} #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP @@ -291,7 +304,7 @@ struct ctl_table; int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, loff_t *ppos); -void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); +void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); @@ -399,7 +412,7 @@ static inline void cpu_vm_stats_fold(int cpu) { } static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, - struct per_cpu_pageset *pset) { } + struct per_cpu_zonestat *pzstats) { } #endif /* CONFIG_SMP */ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, @@ -428,7 +441,7 @@ static inline const char *numa_stat_name(enum numa_stat_item item) static inline const char *node_stat_name(enum node_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + item]; } @@ -440,7 +453,7 @@ static inline const char *lru_list_name(enum lru_list lru) static inline const char *writeback_stat_name(enum writeback_stat_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + NR_VM_NODE_STAT_ITEMS + item]; } @@ -449,7 +462,7 @@ static inline const char *writeback_stat_name(enum writeback_stat_item item) static inline const char *vm_event_name(enum vm_event_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + - NR_VM_NUMA_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + NR_VM_NODE_STAT_ITEMS + NR_VM_WRITEBACK_STAT_ITEMS + item]; diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 84db7b8f912f..212892cf9822 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,6 +14,7 @@ #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> +#include <linux/spinlock.h> #if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/dma-direction.h> #include <linux/vhost_iotlb.h> diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 94e7a315479c..b5ab452fca5b 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -148,27 +148,26 @@ void hide_boot_cursor(bool hide); /* keyboard provided interfaces */ int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); -int vt_do_kdskbmode(int console, unsigned int arg); -int vt_do_kdskbmeta(int console, unsigned int arg); +int vt_do_kdskbmode(unsigned int console, unsigned int arg); +int vt_do_kdskbmeta(unsigned int console, unsigned int arg); int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm); int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, - int console); + unsigned int console); int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm); -int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); -int vt_do_kdgkbmode(int console); -int vt_do_kdgkbmeta(int console); -void vt_reset_unicode(int console); +int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm); +int vt_do_kdgkbmode(unsigned int console); +int vt_do_kdgkbmeta(unsigned int console); +void vt_reset_unicode(unsigned int console); int vt_get_shift_state(void); -void vt_reset_keyboard(int console); -int vt_get_leds(int console, int flag); -int vt_get_kbd_mode_bit(int console, int bit); -void vt_set_kbd_mode_bit(int console, int bit); -void vt_clr_kbd_mode_bit(int console, int bit); -void vt_set_led_state(int console, int leds); -void vt_set_led_state(int console, int leds); -void vt_kbd_con_start(int console); -void vt_kbd_con_stop(int console); +void vt_reset_keyboard(unsigned int console); +int vt_get_leds(unsigned int console, int flag); +int vt_get_kbd_mode_bit(unsigned int console, int bit); +void vt_set_kbd_mode_bit(unsigned int console, int bit); +void vt_clr_kbd_mode_bit(unsigned int console, int bit); +void vt_set_led_state(unsigned int console, int leds); +void vt_kbd_con_start(unsigned int console); +void vt_kbd_con_stop(unsigned int console); void vc_scrolldelta_helper(struct vc_data *c, int lines, unsigned int rolled_over, void *_base, unsigned int size); diff --git a/include/linux/wait.h b/include/linux/wait.h index fe10e8570a52..93dab0e9580f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -56,7 +56,7 @@ struct task_struct; #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .head = { &(name).head, &(name).head } } + .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) @@ -1136,7 +1136,7 @@ do { \ * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); -void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); +bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 9b19e6bb68b5..99660197a36c 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -107,6 +107,7 @@ struct watchdog_device { unsigned int max_hw_heartbeat_ms; struct notifier_block reboot_nb; struct notifier_block restart_nb; + struct notifier_block pm_nb; void *driver_data; struct watchdog_core_data *wd_data; unsigned long status; @@ -116,6 +117,7 @@ struct watchdog_device { #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ +#define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */ struct list_head deferred; }; @@ -156,6 +158,12 @@ static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); } +/* Use the following function to stop the wdog ping worker when suspending */ +static inline void watchdog_stop_ping_on_suspend(struct watchdog_device *wdd) +{ + set_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { @@ -209,6 +217,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, unsigned int timeout_parm, struct device *dev); extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_dev_suspend(struct watchdog_device *wdd); +int watchdog_dev_resume(struct watchdog_device *wdd); int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d15a7730ee18..2ebef6b1a3d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -29,7 +29,7 @@ void delayed_work_timer_fn(struct timer_list *t); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ + WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -42,7 +42,7 @@ enum { WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, - WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -51,19 +51,14 @@ enum { WORK_STRUCT_STATIC = 0, #endif - /* - * The last color is no color used for works which don't - * participate in workqueue flushing. - */ - WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, - WORK_NO_COLOR = WORK_NR_COLORS, + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), /* not bound to any CPU, prefer the local CPU */ WORK_CPU_UNBOUND = NR_CPUS, /* * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. - * This makes pwqs aligned to 256 bytes and allows 15 workqueue + * This makes pwqs aligned to 256 bytes and allows 16 workqueue * flush colors. */ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + @@ -324,7 +319,7 @@ enum { * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to - * excute the work item on that CPU breaking the idleness, which in + * execute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 8e5c5bb16e2d..d1f65adf6a26 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -218,9 +218,10 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, void wbc_detach_inode(struct writeback_control *wbc); void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, size_t bytes); -int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages, +int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); void cgroup_writeback_umount(void); +bool cleanup_offline_cgwb(struct bdi_writeback *wb); /** * inode_attach_wb - associate an inode with its wb @@ -335,14 +336,9 @@ static inline void cgroup_writeback_umount(void) /* * mm/page-writeback.c */ -#ifdef CONFIG_BLOCK void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); -void laptop_mode_sync(struct work_struct *work); void laptop_mode_timer_fn(struct timer_list *t); -#else -static inline void laptop_sync_completion(void) { } -#endif bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK @@ -360,7 +356,6 @@ extern unsigned int dirty_writeback_interval; extern unsigned int dirty_expire_interval; extern unsigned int dirtytime_expire_interval; extern int vm_highmem_is_dirtyable; -extern int block_dump; extern int laptop_mode; int dirty_background_ratio_handler(struct ctl_table *table, int write, @@ -379,7 +374,7 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); -void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); +void wb_update_bandwidth(struct bdi_writeback *wb); void balance_dirty_pages_ratelimited(struct address_space *mapping); bool wb_over_bg_thresh(struct bdi_writeback *wb); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index b77f39f319ad..29db736af86d 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -18,6 +18,24 @@ #define __LINUX_WW_MUTEX_H #include <linux/mutex.h> +#include <linux/rtmutex.h> + +#if defined(CONFIG_DEBUG_MUTEXES) || \ + (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES)) +#define DEBUG_WW_MUTEXES +#endif + +#ifndef CONFIG_PREEMPT_RT +#define WW_MUTEX_BASE mutex +#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) mutex_trylock(l) +#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) +#else +#define WW_MUTEX_BASE rt_mutex +#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) rt_mutex_trylock(l) +#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex) +#endif struct ww_class { atomic_long_t stamp; @@ -28,16 +46,24 @@ struct ww_class { unsigned int is_wait_die; }; +struct ww_mutex { + struct WW_MUTEX_BASE base; + struct ww_acquire_ctx *ctx; +#ifdef DEBUG_WW_MUTEXES + struct ww_class *ww_class; +#endif +}; + struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES unsigned int done_acquire; struct ww_class *ww_class; - struct ww_mutex *contending_lock; + void *contending_lock; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -74,9 +100,9 @@ struct ww_acquire_ctx { static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lock->ww_class = ww_class; #endif } @@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, ctx->acquired = 0; ctx->wounded = false; ctx->is_wait_die = ww_class->is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES ctx->ww_class = ww_class; ctx->done_acquire = 0; ctx->contending_lock = NULL; @@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, */ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lockdep_assert_held(ctx); DEBUG_LOCKS_WARN_ON(ctx->done_acquire); @@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) #ifdef CONFIG_DEBUG_LOCK_ALLOC mutex_release(&ctx->dep_map, _THIS_IP_); #endif -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->acquired); if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* @@ -269,7 +295,7 @@ static inline void ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif ret = ww_mutex_lock(lock, ctx); @@ -305,7 +331,7 @@ static inline int __must_check ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif return ww_mutex_lock_interruptible(lock, ctx); @@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock); */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { - return mutex_trylock(&lock->base); + return ww_mutex_base_trylock(&lock->base); } /*** @@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) */ static inline void ww_mutex_destroy(struct ww_mutex *lock) { +#ifndef CONFIG_PREEMPT_RT mutex_destroy(&lock->base); +#endif } /** @@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock) */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { - return mutex_is_locked(&lock->base); + return ww_mutex_base_is_locked(&lock->base); } #endif diff --git a/include/linux/wwan.h b/include/linux/wwan.h index aa05a253dcf9..9fac819f92e3 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -6,7 +6,10 @@ #include <linux/device.h> #include <linux/kernel.h> +#include <linux/poll.h> #include <linux/skbuff.h> +#include <linux/netlink.h> +#include <linux/netdevice.h> /** * enum wwan_port_type - WWAN port types @@ -15,7 +18,10 @@ * @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control * @WWAN_PORT_QCDM: Qcom Modem diagnostic interface * @WWAN_PORT_FIREHOSE: XML based command protocol - * @WWAN_PORT_MAX: Number of supported port types + * + * @WWAN_PORT_MAX: Highest supported port types + * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type + * @__WWAN_PORT_MAX: Internal use */ enum wwan_port_type { WWAN_PORT_AT, @@ -23,7 +29,12 @@ enum wwan_port_type { WWAN_PORT_QMI, WWAN_PORT_QCDM, WWAN_PORT_FIREHOSE, - WWAN_PORT_MAX, + + /* Add new port types above this line */ + + __WWAN_PORT_MAX, + WWAN_PORT_MAX = __WWAN_PORT_MAX - 1, + WWAN_PORT_UNKNOWN, }; struct wwan_port; @@ -31,15 +42,23 @@ struct wwan_port; /** struct wwan_port_ops - The WWAN port operations * @start: The routine for starting the WWAN port device. * @stop: The routine for stopping the WWAN port device. - * @tx: The routine that sends WWAN port protocol data to the device. + * @tx: Non-blocking routine that sends WWAN port protocol data to the device. + * @tx_blocking: Optional blocking routine that sends WWAN port protocol data + * to the device. + * @tx_poll: Optional routine that sets additional TX poll flags. * * The wwan_port_ops structure contains a list of low-level operations - * that control a WWAN port device. All functions are mandatory. + * that control a WWAN port device. All functions are mandatory unless specified. */ struct wwan_port_ops { int (*start)(struct wwan_port *port); void (*stop)(struct wwan_port *port); int (*tx)(struct wwan_port *port, struct sk_buff *skb); + + /* Optional operations */ + int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb); + __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp, + poll_table *wait); }; /** @@ -108,4 +127,48 @@ void wwan_port_txon(struct wwan_port *port); */ void *wwan_port_get_drvdata(struct wwan_port *port); +/** + * struct wwan_netdev_priv - WWAN core network device private data + * @link_id: WWAN device data link id + * @drv_priv: driver private data area, size is determined in &wwan_ops + */ +struct wwan_netdev_priv { + u32 link_id; + + /* must be last */ + u8 drv_priv[] __aligned(sizeof(void *)); +}; + +static inline void *wwan_netdev_drvpriv(struct net_device *dev) +{ + return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv; +} + +/* + * Used to indicate that the WWAN core should not create a default network + * link. + */ +#define WWAN_NO_DEFAULT_LINK U32_MAX + +/** + * struct wwan_ops - WWAN device ops + * @priv_size: size of private netdev data area + * @setup: set up a new netdev + * @newlink: register the new netdev + * @dellink: remove the given netdev + */ +struct wwan_ops { + unsigned int priv_size; + void (*setup)(struct net_device *dev); + int (*newlink)(void *ctxt, struct net_device *dev, + u32 if_id, struct netlink_ext_ack *extack); + void (*dellink)(void *ctxt, struct net_device *dev, + struct list_head *head); +}; + +int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, + void *ctxt, u32 def_link_id); + +void wwan_unregister_ops(struct device *parent); + #endif /* __WWAN_H */ diff --git a/include/linux/zbud.h b/include/linux/zbud.h deleted file mode 100644 index b1eaf6e31735..000000000000 --- a/include/linux/zbud.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ZBUD_H_ -#define _ZBUD_H_ - -#include <linux/types.h> - -struct zbud_pool; - -struct zbud_ops { - int (*evict)(struct zbud_pool *pool, unsigned long handle); -}; - -struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops); -void zbud_destroy_pool(struct zbud_pool *pool); -int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle); -void zbud_free(struct zbud_pool *pool, unsigned long handle); -int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); -void *zbud_map(struct zbud_pool *pool, unsigned long handle); -void zbud_unmap(struct zbud_pool *pool, unsigned long handle); -u64 zbud_get_pool_size(struct zbud_pool *pool); - -#endif /* _ZBUD_H_ */ diff --git a/include/linux/zorro.h b/include/linux/zorro.h index e2e4de188d84..db7416ed6057 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -29,7 +29,6 @@ struct zorro_dev { struct ExpansionRom rom; zorro_id id; - struct zorro_driver *driver; /* which driver has allocated this device */ struct device dev; /* Generic device interface */ u16 slotaddr; u16 slotsize; diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index 143568d64b20..4b57bbba588a 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -338,7 +338,7 @@ do { \ FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI); \ break; \ } \ - /* FALLTHRU */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h index d37cb74b769c..b0a535d6893a 100644 --- a/include/media/dvb-usb-ids.h +++ b/include/media/dvb-usb-ids.h @@ -394,6 +394,8 @@ #define USB_PID_MYGICA_T230C 0xc689 #define USB_PID_MYGICA_T230C2 0xc68a #define USB_PID_MYGICA_T230C_LITE 0xc699 +#define USB_PID_MYGICA_T230C2_LITE 0xc69a +#define USB_PID_MYGICA_T230A 0x689a #define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 #define USB_PID_ELGATO_EYETV_DTT 0x0021 #define USB_PID_ELGATO_EYETV_DTT_2 0x003f diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h index b4cb2ef02f17..781371bff2ad 100644 --- a/include/media/hevc-ctrls.h +++ b/include/media/hevc-ctrls.h @@ -19,6 +19,7 @@ #define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_CODEC_BASE + 1008) #define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_CODEC_BASE + 1009) #define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_BASE + 1010) +#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_BASE + 1012) #define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_CODEC_BASE + 1015) #define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_CODEC_BASE + 1016) @@ -26,6 +27,7 @@ #define V4L2_CTRL_TYPE_HEVC_SPS 0x0120 #define V4L2_CTRL_TYPE_HEVC_PPS 0x0121 #define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122 +#define V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS 0x0124 enum v4l2_mpeg_video_hevc_decode_mode { V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, @@ -75,13 +77,12 @@ struct v4l2_ctrl_hevc_sps { __u8 num_short_term_ref_pic_sets; __u8 num_long_term_ref_pics_sps; __u8 chroma_format_idc; - - __u8 padding; + __u8 sps_max_sub_layers_minus1; __u64 flags; }; -#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0) +#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0) #define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) #define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) #define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) @@ -100,10 +101,14 @@ struct v4l2_ctrl_hevc_sps { #define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) #define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) #define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19) +#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20) struct v4l2_ctrl_hevc_pps { /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ __u8 num_extra_slice_header_bits; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; __s8 init_qp_minus26; __u8 diff_cu_qp_delta_depth; __s8 pps_cb_qp_offset; @@ -160,6 +165,7 @@ struct v4l2_hevc_pred_weight_table { #define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6) #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7) #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9) struct v4l2_ctrl_hevc_slice_params { __u32 bit_size; @@ -190,23 +196,46 @@ struct v4l2_ctrl_hevc_slice_params { __u8 pic_struct; /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - __u8 num_active_dpb_entries; + __u32 slice_segment_addr; __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 num_rps_poc_st_curr_before; - __u8 num_rps_poc_st_curr_after; - __u8 num_rps_poc_lt_curr; - __u8 padding; - /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ struct v4l2_hevc_pred_weight_table pred_weight_table; __u64 flags; }; +#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1 +#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2 +#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4 + +struct v4l2_ctrl_hevc_decode_params { + __s32 pic_order_cnt_val; + __u8 num_active_dpb_entries; + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 num_poc_st_curr_before; + __u8 num_poc_st_curr_after; + __u8 num_poc_lt_curr; + __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u64 flags; +}; + +/* MPEG-class control IDs specific to the Hantro driver as defined by V4L2 */ +#define V4L2_CID_CODEC_HANTRO_BASE (V4L2_CTRL_CLASS_CODEC | 0x1200) +/* + * V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP - + * the number of data (in bits) to skip in the + * slice segment header. + * If non-IDR, the bits to be skipped go from syntax element "pic_output_flag" + * to before syntax element "slice_temporal_mvp_enabled_flag". + * If IDR, the skipped bits are just "pic_output_flag" + * (separate_colour_plane_flag is not supported). + */ +#define V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP (V4L2_CID_CODEC_HANTRO_BASE + 0) + #endif diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h index b35ea6062596..2ab54d426c64 100644 --- a/include/media/media-dev-allocator.h +++ b/include/media/media-dev-allocator.h @@ -19,7 +19,7 @@ struct usb_device; -#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB) +#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB) /** * media_device_usb_allocate() - Allocate and return struct &media device * diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h deleted file mode 100644 index 2a4ae6701166..000000000000 --- a/include/media/mpeg2-ctrls.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * These are the MPEG2 state controls for use with stateless MPEG-2 - * codec drivers. - * - * It turns out that these structs are not stable yet and will undergo - * more changes. So keep them private until they are stable and ready to - * become part of the official public API. - */ - -#ifndef _MPEG2_CTRLS_H_ -#define _MPEG2_CTRLS_H_ - -#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_CODEC_BASE+250) -#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_CODEC_BASE+251) - -/* enum v4l2_ctrl_type type values */ -#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 -#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 - -#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 - -struct v4l2_mpeg2_sequence { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ - __u16 horizontal_size; - __u16 vertical_size; - __u32 vbv_buffer_size; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ - __u16 profile_and_level_indication; - __u8 progressive_sequence; - __u8 chroma_format; -}; - -struct v4l2_mpeg2_picture { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ - __u8 picture_coding_type; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ - __u8 f_code[2][2]; - __u8 intra_dc_precision; - __u8 picture_structure; - __u8 top_field_first; - __u8 frame_pred_frame_dct; - __u8 concealment_motion_vectors; - __u8 q_scale_type; - __u8 intra_vlc_format; - __u8 alternate_scan; - __u8 repeat_first_field; - __u16 progressive_frame; -}; - -struct v4l2_ctrl_mpeg2_slice_params { - __u32 bit_size; - __u32 data_bit_offset; - __u64 backward_ref_ts; - __u64 forward_ref_ts; - - struct v4l2_mpeg2_sequence sequence; - struct v4l2_mpeg2_picture picture; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ - __u32 quantiser_scale_code; -}; - -struct v4l2_ctrl_mpeg2_quantization { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ - __u8 load_intra_quantiser_matrix; - __u8 load_non_intra_quantiser_matrix; - __u8 load_chroma_intra_quantiser_matrix; - __u8 load_chroma_non_intra_quantiser_matrix; - - __u8 intra_quantiser_matrix[64]; - __u8 non_intra_quantiser_matrix[64]; - __u8 chroma_intra_quantiser_matrix[64]; - __u8 chroma_non_intra_quantiser_matrix[64]; -}; - -#endif diff --git a/include/media/rc-core.h b/include/media/rc-core.h index a1019c4ab5e8..8c5b7978e1d9 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -151,7 +151,7 @@ struct lirc_fh { * @tx_ir: transmit IR * @s_idle: enable/disable hardware idle mode, upon which, * device doesn't interrupt host until it sees IR pulses - * @s_learning_mode: enable wide band receiver used for learning + * @s_wideband_receiver: enable wide band receiver used for learning * @s_carrier_report: enable carrier reports * @s_filter: set the scancode filter * @s_wakeup_filter: set the wakeup scancode filter. If the mask is zero @@ -218,7 +218,7 @@ struct rc_dev { int (*s_rx_carrier_range)(struct rc_dev *dev, u32 min, u32 max); int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n); void (*s_idle)(struct rc_dev *dev, bool enable); - int (*s_learning_mode)(struct rc_dev *dev, int enable); + int (*s_wideband_receiver)(struct rc_dev *dev, int enable); int (*s_carrier_report) (struct rc_dev *dev, int enable); int (*s_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter); @@ -313,6 +313,7 @@ struct ir_raw_event { #define MS_TO_US(msec) ((msec) * 1000) #define IR_MAX_DURATION MS_TO_US(500) #define IR_DEFAULT_TIMEOUT MS_TO_US(125) +#define IR_MAX_TIMEOUT LIRC_VALUE_MASK void ir_raw_event_handle(struct rc_dev *dev); int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index b5585d14fff4..793b54342dff 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -231,6 +231,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_CEC "rc-cec" #define RC_MAP_CINERGY "rc-cinergy" #define RC_MAP_CINERGY_1400 "rc-cinergy-1400" +#define RC_MAP_CT_90405 "rc-ct-90405" #define RC_MAP_D680_DMB "rc-d680-dmb" #define RC_MAP_DELOCK_61959 "rc-delock-61959" #define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" @@ -312,7 +313,6 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" #define RC_MAP_STREAMZAP "rc-streamzap" #define RC_MAP_SU3000 "rc-su3000" -#define RC_MAP_TANGO "rc-tango" #define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" #define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" #define RC_MAP_TBS_NEC "rc-tbs-nec" diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 5b275a845c20..fa4901162663 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -129,11 +129,11 @@ void v4l2_async_debug_init(struct dentry *debugfs_dir); * * This function initializes the notifier @asd_list. It must be called * before adding a subdevice to a notifier, using one of: - * @v4l2_async_notifier_add_fwnode_remote_subdev, - * @v4l2_async_notifier_add_fwnode_subdev, - * @v4l2_async_notifier_add_i2c_subdev, - * @__v4l2_async_notifier_add_subdev or - * @v4l2_async_notifier_parse_fwnode_endpoints. + * v4l2_async_notifier_add_fwnode_remote_subdev(), + * v4l2_async_notifier_add_fwnode_subdev(), + * v4l2_async_notifier_add_i2c_subdev(), + * __v4l2_async_notifier_add_subdev() or + * v4l2_async_notifier_parse_fwnode_endpoints(). */ void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier); @@ -145,9 +145,9 @@ void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier); * @asd: pointer to &struct v4l2_async_subdev * * \warning: Drivers should avoid using this function and instead use one of: - * @v4l2_async_notifier_add_fwnode_subdev, - * @v4l2_async_notifier_add_fwnode_remote_subdev or - * @v4l2_async_notifier_add_i2c_subdev. + * v4l2_async_notifier_add_fwnode_subdev(), + * v4l2_async_notifier_add_fwnode_remote_subdev() or + * v4l2_async_notifier_add_i2c_subdev(). * * Call this function before registering a notifier to link the provided @asd to * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as @@ -200,7 +200,7 @@ __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif * function also gets a reference of the fwnode which is released later at * notifier cleanup time. * - * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the + * This is just like v4l2_async_notifier_add_fwnode_subdev(), but with the * exception that the fwnode refers to a local endpoint, not the remote one. */ #define v4l2_async_notifier_add_fwnode_remote_subdev(notifier, ep, type) \ @@ -265,13 +265,13 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier); * sub-devices allocated for the purposes of the notifier but not the notifier * itself. The user is responsible for calling this function to clean up the * notifier after calling - * @v4l2_async_notifier_add_fwnode_remote_subdev, - * @v4l2_async_notifier_add_fwnode_subdev, - * @v4l2_async_notifier_add_i2c_subdev, - * @__v4l2_async_notifier_add_subdev or - * @v4l2_async_notifier_parse_fwnode_endpoints. + * v4l2_async_notifier_add_fwnode_remote_subdev(), + * v4l2_async_notifier_add_fwnode_subdev(), + * v4l2_async_notifier_add_i2c_subdev(), + * __v4l2_async_notifier_add_subdev() or + * v4l2_async_notifier_parse_fwnode_endpoints(). * - * There is no harm from calling v4l2_async_notifier_cleanup in other + * There is no harm from calling v4l2_async_notifier_cleanup() in other * cases as long as its memory has been zeroed after it has been * allocated. */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index a5953b812878..575b59fbac77 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -17,7 +17,6 @@ * Include the stateless codec compound control definitions. * This will move to the public headers once this API is fully stable. */ -#include <media/mpeg2-ctrls.h> #include <media/hevc-ctrls.h> /* forward references */ @@ -40,8 +39,9 @@ struct video_device; * @p_u16: Pointer to a 16-bit unsigned value. * @p_u32: Pointer to a 32-bit unsigned value. * @p_char: Pointer to a string. - * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure. - * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure. + * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. + * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. + * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. * @p_fwht_params: Pointer to a FWHT stateless parameters structure. * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. @@ -66,8 +66,9 @@ union v4l2_ctrl_ptr { u16 *p_u16; u32 *p_u32; char *p_char; - struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; - struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_sps *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d0e9a5bdb08b..95ec18c2f49c 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config { * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with * a direction argument if needed. * + * @command: called by in-kernel drivers in order to call functions internal + * to subdev drivers driver that have a separate callback. + * * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core. * used to provide support for private ioctls used on the driver. * @@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops { int (*load_fw)(struct v4l2_subdev *sd); int (*reset)(struct v4l2_subdev *sd, u32 val); int (*s_gpio)(struct v4l2_subdev *sd, u32 val); + long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); #ifdef CONFIG_COMPAT long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd, @@ -351,6 +355,16 @@ struct v4l2_mbus_frame_desc { }; /** + * enum v4l2_subdev_pre_streamon_flags - Flags for pre_streamon subdev core op + * + * @V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP: Set the transmitter to either LP-11 + * or LP-111 mode before call to s_stream(). + */ +enum v4l2_subdev_pre_streamon_flags { + V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP = BIT(0), +}; + +/** * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened * in video mode. * @@ -405,6 +419,19 @@ struct v4l2_mbus_frame_desc { * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev * can adjust @size to a lower value and must not write more data to the * buffer starting at @data than the original value of @size. + * + * @pre_streamon: May be called before streaming is actually started, to help + * initialising the bus. Current usage is to set a CSI-2 transmitter to + * LP-11 or LP-111 mode before streaming. See &enum + * v4l2_subdev_pre_streamon_flags. + * + * pre_streamon shall return error if it cannot perform the operation as + * indicated by the flags argument. In particular, -EACCES indicates lack + * of support for the operation. The caller shall call post_streamoff for + * each successful call of pre_streamon. + * + * @post_streamoff: Called after streaming is stopped, but if and only if + * pre_streamon was called earlier. */ struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); @@ -431,6 +458,8 @@ struct v4l2_subdev_video_ops { struct v4l2_dv_timings *timings); int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, unsigned int *size); + int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); + int (*post_streamoff)(struct v4l2_subdev *sd); }; /** @@ -624,6 +653,19 @@ struct v4l2_subdev_pad_config { }; /** + * struct v4l2_subdev_state - Used for storing subdev state information. + * + * @pads: &struct v4l2_subdev_pad_config array + * + * This structure only needs to be passed to the pad op if the 'which' field + * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For + * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. + */ +struct v4l2_subdev_state { + struct v4l2_subdev_pad_config *pads; +}; + +/** * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations * * @init_cfg: initialize the pad config to default values @@ -687,27 +729,27 @@ struct v4l2_subdev_pad_config { */ struct v4l2_subdev_pad_ops { int (*init_cfg)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg); + struct v4l2_subdev_state *state); int (*enum_mbus_code)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code); int (*enum_frame_size)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse); int (*enum_frame_interval)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval_enum *fie); int (*get_fmt)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); int (*set_fmt)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); int (*get_selection)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel); int (*set_selection)(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel); int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); @@ -854,7 +896,7 @@ struct v4l2_subdev_platform_data { * @asd: Pointer to respective &struct v4l2_async_subdev. * @notifier: Pointer to the managing notifier. * @subdev_notifier: A sub-device notifier implicitly registered for the sub- - * device using v4l2_device_register_sensor_subdev(). + * device using v4l2_async_register_subdev_sensor(). * @pdata: common part of subdevice platform data * * Each instance of a subdev driver should create this struct, either @@ -918,14 +960,14 @@ struct v4l2_subdev { * struct v4l2_subdev_fh - Used for storing subdev information per file handle * * @vfh: pointer to &struct v4l2_fh - * @pad: pointer to &struct v4l2_subdev_pad_config + * @state: pointer to &struct v4l2_subdev_state * @owner: module pointer to the owner of this file handle */ struct v4l2_subdev_fh { struct v4l2_fh vfh; struct module *owner; #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) - struct v4l2_subdev_pad_config *pad; + struct v4l2_subdev_state *state; #endif }; @@ -945,17 +987,17 @@ struct v4l2_subdev_fh { * &struct v4l2_subdev_pad_config->try_fmt * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array */ static inline struct v4l2_mbus_framefmt * v4l2_subdev_get_try_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_fmt; + return &state->pads[pad].try_fmt; } /** @@ -963,17 +1005,17 @@ v4l2_subdev_get_try_format(struct v4l2_subdev *sd, * &struct v4l2_subdev_pad_config->try_crop * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state. + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. */ static inline struct v4l2_rect * v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_crop; + return &state->pads[pad].try_crop; } /** @@ -981,17 +1023,17 @@ v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, * &struct v4l2_subdev_pad_config->try_compose * * @sd: pointer to &struct v4l2_subdev - * @cfg: pointer to &struct v4l2_subdev_pad_config array. - * @pad: index of the pad in the @cfg array. + * @state: pointer to &struct v4l2_subdev_state. + * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. */ static inline struct v4l2_rect * v4l2_subdev_get_try_compose(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_state *state, unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; - return &cfg[pad].try_compose; + return &state->pads[pad].try_compose; } #endif @@ -1093,20 +1135,21 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, int v4l2_subdev_link_validate(struct media_link *link); /** - * v4l2_subdev_alloc_pad_config - Allocates memory for pad config + * v4l2_subdev_alloc_state - allocate v4l2_subdev_state * - * @sd: pointer to struct v4l2_subdev + * @sd: pointer to &struct v4l2_subdev for which the state is being allocated. + * + * Must call v4l2_subdev_free_state() when state is no longer needed. */ -struct -v4l2_subdev_pad_config *v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd); +struct v4l2_subdev_state *v4l2_subdev_alloc_state(struct v4l2_subdev *sd); /** - * v4l2_subdev_free_pad_config - Frees memory allocated by - * v4l2_subdev_alloc_pad_config(). + * v4l2_subdev_free_state - free a v4l2_subdev_state * - * @cfg: pointer to &struct v4l2_subdev_pad_config + * @state: v4l2_subdev_state to be freed. */ -void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg); +void v4l2_subdev_free_state(struct v4l2_subdev_state *state); + #endif /* CONFIG_MEDIA_CONTROLLER */ /** diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index c203047eb834..b66585e304e2 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -262,6 +262,22 @@ int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); void vb2_queue_release(struct vb2_queue *q); /** + * vb2_queue_change_type() - change the type of an inactive vb2_queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: the type to change to (V4L2_BUF_TYPE_VIDEO_*) + * + * This function changes the type of the vb2_queue. This is only possible + * if the queue is not busy (i.e. no buffers have been allocated). + * + * vb2_queue_change_type() can be used to support multiple buffer types using + * the same queue. The driver can implement v4l2_ioctl_ops.vidioc_reqbufs and + * v4l2_ioctl_ops.vidioc_create_bufs functions and call vb2_queue_change_type() + * before calling vb2_ioctl_reqbufs() or vb2_ioctl_create_bufs(), and thus + * "lock" the buffer type until the buffers have been released. + */ +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type); + +/** * vb2_poll() - implements poll userspace operation * @q: pointer to &struct vb2_queue with videobuf2 queue. * @file: file argument passed to the poll file operation handler diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index 14cfd036268a..e3e770f76f34 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -19,7 +19,7 @@ enum rpcif_data_dir { RPCIF_DATA_OUT, }; -struct rpcif_op { +struct rpcif_op { struct { u8 buswidth; u8 opcode; @@ -57,7 +57,7 @@ struct rpcif_op { } data; }; -struct rpcif { +struct rpcif { struct device *dev; void __iomem *dirmap; struct regmap *regmap; @@ -76,7 +76,7 @@ struct rpcif { u32 ddr; /* DRDRENR or SMDRENR */ }; -int rpcif_sw_init(struct rpcif *rpc, struct device *dev); +int rpcif_sw_init(struct rpcif *rpc, struct device *dev); void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, size_t *len); diff --git a/include/net/Space.h b/include/net/Space.h index 9cce0d80d37a..08ca9cef0213 100644 --- a/include/net/Space.h +++ b/include/net/Space.h @@ -8,23 +8,13 @@ struct net_device *ultra_probe(int unit); struct net_device *wd_probe(int unit); struct net_device *ne_probe(int unit); struct net_device *fmv18x_probe(int unit); -struct net_device *i82596_probe(int unit); struct net_device *ni65_probe(int unit); struct net_device *sonic_probe(int unit); struct net_device *smc_init(int unit); -struct net_device *atarilance_probe(int unit); -struct net_device *sun3lance_probe(int unit); -struct net_device *sun3_82586_probe(int unit); -struct net_device *apne_probe(int unit); struct net_device *cs89x0_probe(int unit); -struct net_device *mvme147lance_probe(int unit); struct net_device *tc515_probe(int unit); struct net_device *lance_probe(int unit); struct net_device *cops_probe(int unit); -struct net_device *ltpc_probe(void); /* Fibre Channel adapters */ int iph5526_probe(struct net_device *dev); - -/* SBNI adapters */ -int sbni_probe(int unit); diff --git a/include/net/act_api.h b/include/net/act_api.h index 086b291e9530..f19f7f4a463c 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -58,6 +58,14 @@ struct tc_action { #define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \ TCA_ACT_HW_STATS_DELAYED) +/* Reserve 16 bits for user-space. See TCA_ACT_FLAGS_NO_PERCPU_STATS. */ +#define TCA_ACT_FLAGS_USER_BITS 16 +#define TCA_ACT_FLAGS_USER_MASK 0xffff +#define TCA_ACT_FLAGS_POLICE (1U << TCA_ACT_FLAGS_USER_BITS) +#define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1)) +#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2)) +#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3)) + /* Update lastuse only if needed, to avoid dirtying a cache line. * We use a temp variable to avoid fetching jiffies twice. */ @@ -99,8 +107,8 @@ struct tc_action_ops { void (*cleanup)(struct tc_action *); int (*lookup)(struct net *net, struct tc_action **a, u32 index); int (*init)(struct net *net, struct nlattr *nla, - struct nlattr *est, struct tc_action **act, int ovr, - int bind, bool rtnl_held, struct tcf_proto *tp, + struct nlattr *est, struct tc_action **act, + struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, @@ -179,18 +187,16 @@ int tcf_action_destroy(struct tc_action *actions[], int bind); int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, - struct nlattr *est, char *name, int ovr, int bind, + struct nlattr *est, struct tc_action *actions[], int init_res[], size_t *attr_size, - bool rtnl_held, struct netlink_ext_ack *extack); -struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, + u32 flags, struct netlink_ext_ack *extack); +struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police, bool rtnl_held, struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, - char *name, int ovr, int bind, struct tc_action_ops *a_o, int *init_res, - bool rtnl_held, - struct netlink_ext_ack *extack); + u32 flags, struct netlink_ext_ack *extack); int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index f42fdddecd41..7d142e8a0550 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -70,6 +70,9 @@ struct unix_sock { struct socket_wq peer_wq; wait_queue_entry_t peer_wake; struct scm_stat scm_stat; +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + struct sk_buff *oob_skb; +#endif }; static inline struct unix_sock *unix_sk(const struct sock *sk) @@ -82,6 +85,10 @@ static inline struct unix_sock *unix_sk(const struct sock *sk) long unix_inq_len(struct sock *sk); long unix_outq_len(struct sock *sk); +int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, + int flags); +int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, + int flags); #ifdef CONFIG_SYSCTL int unix_sysctl_register(struct net *net); void unix_sysctl_unregister(struct net *net); @@ -89,4 +96,16 @@ void unix_sysctl_unregister(struct net *net); static inline int unix_sysctl_register(struct net *net) { return 0; } static inline void unix_sysctl_unregister(struct net *net) {} #endif + +#ifdef CONFIG_BPF_SYSCALL +extern struct proto unix_dgram_proto; +extern struct proto unix_stream_proto; + +int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); +int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); +void __init unix_bpf_build_proto(void); +#else +static inline void __init unix_bpf_build_proto(void) +{} +#endif #endif diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index b1c717286993..ab207677e0a8 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -135,6 +135,14 @@ struct vsock_transport { bool (*stream_is_active)(struct vsock_sock *); bool (*stream_allow)(u32 cid, u32 port); + /* SEQ_PACKET. */ + ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg, + int flags); + int (*seqpacket_enqueue)(struct vsock_sock *vsk, struct msghdr *msg, + size_t len); + bool (*seqpacket_allow)(u32 remote_cid); + u32 (*seqpacket_has_data)(struct vsock_sock *vsk); + /* Notification. */ int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); diff --git a/include/net/ax88796.h b/include/net/ax88796.h index aa52b2e8ff7b..2ed23a368602 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -38,4 +38,7 @@ struct ax_plat_data { int (*check_irq)(struct platform_device *pdev); }; +/* exported from ax88796.c for xsurf100.c */ +extern void ax_NS8390_reinit(struct net_device *dev); + #endif /* __NET_AX88796_PLAT_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index ea4ae551c426..b80415011dcd 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -36,7 +36,7 @@ #define HCI_MAX_AMP_ASSOC_SIZE 672 -#define HCI_MAX_CSB_DATA_SIZE 252 +#define HCI_MAX_CPB_DATA_SIZE 252 /* HCI dev events */ #define HCI_DEV_REG 1 @@ -339,6 +339,7 @@ enum { #define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */ #define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */ #define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ +#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ #define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */ #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */ @@ -471,10 +472,10 @@ enum { #define LMP_EXTFEATURES 0x80 /* Extended LMP features */ -#define LMP_CSB_MASTER 0x01 -#define LMP_CSB_SLAVE 0x02 -#define LMP_SYNC_TRAIN 0x04 -#define LMP_SYNC_SCAN 0x08 +#define LMP_CPB_CENTRAL 0x01 +#define LMP_CPB_PERIPHERAL 0x02 +#define LMP_SYNC_TRAIN 0x04 +#define LMP_SYNC_SCAN 0x08 #define LMP_SC 0x01 #define LMP_PING 0x02 @@ -488,7 +489,7 @@ enum { /* LE features */ #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 -#define HCI_LE_SLAVE_FEATURES 0x08 +#define HCI_LE_PERIPHERAL_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_LL_PRIVACY 0x40 @@ -497,8 +498,8 @@ enum { #define HCI_LE_PHY_CODED 0x08 #define HCI_LE_EXT_ADV 0x10 #define HCI_LE_CHAN_SEL_ALG2 0x40 -#define HCI_LE_CIS_MASTER 0x10 -#define HCI_LE_CIS_SLAVE 0x20 +#define HCI_LE_CIS_CENTRAL 0x10 +#define HCI_LE_CIS_PERIPHERAL 0x20 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -876,17 +877,17 @@ struct hci_rp_logical_link_cancel { __u8 flow_spec_id; } __packed; -#define HCI_OP_SET_CSB 0x0441 -struct hci_cp_set_csb { +#define HCI_OP_SET_CPB 0x0441 +struct hci_cp_set_cpb { __u8 enable; __u8 lt_addr; __u8 lpo_allowed; __le16 packet_type; __le16 interval_min; __le16 interval_max; - __le16 csb_sv_tout; + __le16 cpb_sv_tout; } __packed; -struct hci_rp_set_csb { +struct hci_rp_set_cpb { __u8 status; __u8 lt_addr; __le16 interval; @@ -1183,14 +1184,14 @@ struct hci_rp_delete_reserved_lt_addr { __u8 lt_addr; } __packed; -#define HCI_OP_SET_CSB_DATA 0x0c76 -struct hci_cp_set_csb_data { +#define HCI_OP_SET_CPB_DATA 0x0c76 +struct hci_cp_set_cpb_data { __u8 lt_addr; __u8 fragment; __u8 data_length; - __u8 data[HCI_MAX_CSB_DATA_SIZE]; + __u8 data[HCI_MAX_CPB_DATA_SIZE]; } __packed; -struct hci_rp_set_csb_data { +struct hci_rp_set_cpb_data { __u8 status; __u8 lt_addr; } __packed; @@ -1504,7 +1505,7 @@ struct hci_cp_le_set_scan_enable { } __packed; #define HCI_LE_USE_PEER_ADDR 0x00 -#define HCI_LE_USE_WHITELIST 0x01 +#define HCI_LE_USE_ACCEPT_LIST 0x01 #define HCI_OP_LE_CREATE_CONN 0x200d struct hci_cp_le_create_conn { @@ -1524,22 +1525,22 @@ struct hci_cp_le_create_conn { #define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e -#define HCI_OP_LE_READ_WHITE_LIST_SIZE 0x200f -struct hci_rp_le_read_white_list_size { +#define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f +struct hci_rp_le_read_accept_list_size { __u8 status; __u8 size; } __packed; -#define HCI_OP_LE_CLEAR_WHITE_LIST 0x2010 +#define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010 -#define HCI_OP_LE_ADD_TO_WHITE_LIST 0x2011 -struct hci_cp_le_add_to_white_list { +#define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011 +struct hci_cp_le_add_to_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; -#define HCI_OP_LE_DEL_FROM_WHITE_LIST 0x2012 -struct hci_cp_le_del_from_white_list { +#define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012 +struct hci_cp_le_del_from_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; @@ -1774,13 +1775,15 @@ struct hci_cp_ext_adv_set { __u8 max_events; } __packed; +#define HCI_MAX_EXT_AD_LENGTH 251 + #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037 struct hci_cp_le_set_ext_adv_data { __u8 handle; __u8 operation; __u8 frag_pref; __u8 length; - __u8 data[HCI_MAX_AD_LENGTH]; + __u8 data[]; } __packed; #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038 @@ -1789,7 +1792,7 @@ struct hci_cp_le_set_ext_scan_rsp_data { __u8 operation; __u8 frag_pref; __u8 length; - __u8 data[HCI_MAX_AD_LENGTH]; + __u8 data[]; } __packed; #define LE_SET_ADV_DATA_OP_COMPLETE 0x03 @@ -1838,23 +1841,23 @@ struct hci_rp_le_read_iso_tx_sync { #define HCI_OP_LE_SET_CIG_PARAMS 0x2062 struct hci_cis_params { __u8 cis_id; - __le16 m_sdu; - __le16 s_sdu; - __u8 m_phy; - __u8 s_phy; - __u8 m_rtn; - __u8 s_rtn; + __le16 c_sdu; + __le16 p_pdu; + __u8 c_phy; + __u8 p_phy; + __u8 c_rtn; + __u8 p_rtn; } __packed; struct hci_cp_le_set_cig_params { __u8 cig_id; - __u8 m_interval[3]; - __u8 s_interval[3]; - __u8 sca; + __u8 c_interval[3]; + __u8 p_interval[3]; + __u8 wc_sca; __u8 packing; __u8 framing; - __le16 m_latency; - __le16 s_latency; + __le16 c_latency; + __le16 p_latency; __u8 num_cis; struct hci_cis_params cis[]; } __packed; @@ -2259,7 +2262,7 @@ struct hci_ev_sync_train_complete { __u8 status; } __packed; -#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54 +#define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54 #define HCI_EV_LE_CONN_COMPLETE 0x01 struct hci_ev_le_conn_complete { @@ -2417,17 +2420,17 @@ struct hci_evt_le_cis_established { __le16 handle; __u8 cig_sync_delay[3]; __u8 cis_sync_delay[3]; - __u8 m_latency[3]; - __u8 s_latency[3]; - __u8 m_phy; - __u8 s_phy; + __u8 c_latency[3]; + __u8 p_latency[3]; + __u8 c_phy; + __u8 p_phy; __u8 nse; - __u8 m_bn; - __u8 s_bn; - __u8 m_ft; - __u8 s_ft; - __le16 m_mtu; - __le16 s_mtu; + __u8 c_bn; + __u8 p_bn; + __u8 c_ft; + __u8 p_ft; + __le16 c_mtu; + __le16 p_mtu; __le16 interval; } __packed; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c73ac52af186..a7360c8c72f8 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -122,7 +122,7 @@ struct hci_conn_hash { unsigned int amp_num; unsigned int sco_num; unsigned int le_num; - unsigned int le_num_slave; + unsigned int le_num_peripheral; }; struct bdaddr_list { @@ -221,6 +221,7 @@ struct oob_data { struct adv_info { struct list_head list; + bool enabled; bool pending; __u8 instance; __u32 flags; @@ -228,9 +229,9 @@ struct adv_info { __u16 remaining_time; __u16 duration; __u16 adv_data_len; - __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u16 scan_rsp_len; - __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; @@ -327,7 +328,7 @@ struct hci_dev { __u8 max_page; __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; - __u8 le_white_list_size; + __u8 le_accept_list_size; __u8 le_resolv_list_size; __u8 le_num_of_adv_sets; __u8 le_states[8]; @@ -470,6 +471,7 @@ struct hci_dev { struct delayed_work service_cache; struct delayed_work cmd_timer; + struct delayed_work ncmd_timer; struct work_struct rx_work; struct work_struct cmd_work; @@ -521,14 +523,14 @@ struct hci_dev { struct hci_conn_hash conn_hash; struct list_head mgmt_pending; - struct list_head blacklist; - struct list_head whitelist; + struct list_head reject_list; + struct list_head accept_list; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; struct list_head identity_resolving_keys; struct list_head remote_oob_data; - struct list_head le_white_list; + struct list_head le_accept_list; struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; @@ -550,9 +552,9 @@ struct hci_dev { DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); __s8 adv_tx_power; - __u8 adv_data[HCI_MAX_AD_LENGTH]; + __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data_len; - __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; struct list_head adv_instances; @@ -627,6 +629,7 @@ struct hci_conn { __u8 init_addr_type; bdaddr_t resp_addr; __u8 resp_addr_type; + __u8 adv_instance; __u16 handle; __u16 state; __u8 mode; @@ -893,7 +896,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) case LE_LINK: h->le_num++; if (c->role == HCI_ROLE_SLAVE) - h->le_num_slave++; + h->le_num_peripheral++; break; case SCO_LINK: case ESCO_LINK: @@ -919,7 +922,7 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) case LE_LINK: h->le_num--; if (c->role == HCI_ROLE_SLAVE) - h->le_num_slave--; + h->le_num_peripheral--; break; case SCO_LINK: case ESCO_LINK: @@ -1222,13 +1225,25 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) dev_set_drvdata(&hdev->dev, data); } +static inline void *hci_get_priv(struct hci_dev *hdev) +{ + return (char *)hdev + sizeof(*hdev); +} + struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); -struct hci_dev *hci_alloc_dev(void); +struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); + +static inline struct hci_dev *hci_alloc_dev(void) +{ + return hci_alloc_dev_priv(0); +} + void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); +void hci_release_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); @@ -1393,8 +1408,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ -#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) -#define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE) +#define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) +#define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) @@ -1410,6 +1425,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); !hci_dev_test_flag(dev, HCI_AUTO_OFF)) #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) +#define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ + !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) +#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ + !adv->rpa_expired) #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) @@ -1768,7 +1787,7 @@ void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, - u32 flags, u8 *name, u8 name_len); + u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index a7cffb069565..23a0524061b7 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -202,7 +202,7 @@ struct mgmt_cp_load_link_keys { struct mgmt_ltk_info { struct mgmt_addr_info addr; __u8 type; - __u8 master; + __u8 initiator; __u8 enc_size; __le16 ediv; __le64 rand; @@ -939,6 +939,7 @@ struct mgmt_ev_auth_failed { #define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 #define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 #define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04 +#define MGMT_DEV_FOUND_INITIATED_CONN 0x08 #define MGMT_EV_DEVICE_FOUND 0x0012 struct mgmt_ev_device_found { diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c8696a230b7d..38785d48baff 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -303,6 +303,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); +void bond_3ad_update_lacp_active(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); void bond_3ad_update_ad_actor_settings(struct bonding *bond); int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 9d382f2f0bc5..e64833a674eb 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -64,6 +64,7 @@ enum { BOND_OPT_AD_USER_PORT_KEY, BOND_OPT_NUM_PEER_NOTIF_ALIAS, BOND_OPT_PEER_NOTIF_DELAY, + BOND_OPT_LACP_ACTIVE, BOND_OPT_LAST }; diff --git a/include/net/bonding.h b/include/net/bonding.h index 019e998d944a..15e083e18f75 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -129,6 +129,7 @@ struct bond_params { int updelay; int downdelay; int peer_notif_delay; + int lacp_active; int lacp_fast; unsigned int min_links; int ad_select; @@ -149,11 +150,6 @@ struct bond_params { u8 ad_actor_system[ETH_ALEN + 2]; }; -struct bond_parm_tbl { - char *modename; - int mode; -}; - struct slave { struct net_device *dev; /* first - useful for panic debug */ struct bonding *bond; /* our master */ @@ -201,6 +197,11 @@ struct bond_up_slave { */ #define BOND_LINK_NOCHANGE -1 +struct bond_ipsec { + struct list_head list; + struct xfrm_state *xs; +}; + /* * Here are the locking policies for the two bonding locks: * Get rcu_read_lock when reading or RTNL when writing slave list. @@ -232,7 +233,7 @@ struct bonding { char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - u32 rr_tx_counter; + u32 __percpu *rr_tx_counter; struct ad_bond_info ad_info; struct alb_bond_info alb_info; struct bond_params params; @@ -249,8 +250,11 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; #ifdef CONFIG_XFRM_OFFLOAD - struct xfrm_state *xs; + struct list_head ipsec_list; + /* protecting ipsec_list */ + spinlock_t ipsec_lock; #endif /* CONFIG_XFRM_OFFLOAD */ + struct bpf_prog *xdp_prog; }; #define bond_slave_get_rcu(dev) \ @@ -746,13 +750,6 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) /* exported from bond_main.c */ extern unsigned int bond_net_id; -extern const struct bond_parm_tbl bond_lacp_tbl[]; -extern const struct bond_parm_tbl xmit_hashtype_tbl[]; -extern const struct bond_parm_tbl arp_validate_tbl[]; -extern const struct bond_parm_tbl arp_all_targets_tbl[]; -extern const struct bond_parm_tbl fail_over_mac_tbl[]; -extern const struct bond_parm_tbl pri_reselect_tbl[]; -extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 73af4a64a599..40296ed976a9 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void) static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); } bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h deleted file mode 100644 index 552cf68d28d2..000000000000 --- a/include/net/caif/caif_hsi.h +++ /dev/null @@ -1,200 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson / daniel.martensson@stericsson.com - * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com - */ - -#ifndef CAIF_HSI_H_ -#define CAIF_HSI_H_ - -#include <net/caif/caif_layer.h> -#include <net/caif/caif_device.h> -#include <linux/atomic.h> - -/* - * Maximum number of CAIF frames that can reside in the same HSI frame. - */ -#define CFHSI_MAX_PKTS 15 - -/* - * Maximum number of bytes used for the frame that can be embedded in the - * HSI descriptor. - */ -#define CFHSI_MAX_EMB_FRM_SZ 96 - -/* - * Decides if HSI buffers should be prefilled with 0xFF pattern for easier - * debugging. Both TX and RX buffers will be filled before the transfer. - */ -#define CFHSI_DBG_PREFILL 0 - -/* Structure describing a HSI packet descriptor. */ -#pragma pack(1) /* Byte alignment. */ -struct cfhsi_desc { - u8 header; - u8 offset; - u16 cffrm_len[CFHSI_MAX_PKTS]; - u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; -}; -#pragma pack() /* Default alignment. */ - -/* Size of the complete HSI packet descriptor. */ -#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) - -/* - * Size of the complete HSI packet descriptor excluding the optional embedded - * CAIF frame. - */ -#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) - -/* - * Maximum bytes transferred in one transfer. - */ -#define CFHSI_MAX_CAIF_FRAME_SZ 4096 - -#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ) - -/* Size of the complete HSI TX buffer. */ -#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) - -/* Size of the complete HSI RX buffer. */ -#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) - -/* Bitmasks for the HSI descriptor. */ -#define CFHSI_PIGGY_DESC (0x01 << 7) - -#define CFHSI_TX_STATE_IDLE 0 -#define CFHSI_TX_STATE_XFER 1 - -#define CFHSI_RX_STATE_DESC 0 -#define CFHSI_RX_STATE_PAYLOAD 1 - -/* Bitmasks for power management. */ -#define CFHSI_WAKE_UP 0 -#define CFHSI_WAKE_UP_ACK 1 -#define CFHSI_WAKE_DOWN_ACK 2 -#define CFHSI_AWAKE 3 -#define CFHSI_WAKELOCK_HELD 4 -#define CFHSI_SHUTDOWN 5 -#define CFHSI_FLUSH_FIFO 6 - -#ifndef CFHSI_INACTIVITY_TOUT -#define CFHSI_INACTIVITY_TOUT (1 * HZ) -#endif /* CFHSI_INACTIVITY_TOUT */ - -#ifndef CFHSI_WAKE_TOUT -#define CFHSI_WAKE_TOUT (3 * HZ) -#endif /* CFHSI_WAKE_TOUT */ - -#ifndef CFHSI_MAX_RX_RETRIES -#define CFHSI_MAX_RX_RETRIES (10 * HZ) -#endif - -/* Structure implemented by the CAIF HSI driver. */ -struct cfhsi_cb_ops { - void (*tx_done_cb) (struct cfhsi_cb_ops *drv); - void (*rx_done_cb) (struct cfhsi_cb_ops *drv); - void (*wake_up_cb) (struct cfhsi_cb_ops *drv); - void (*wake_down_cb) (struct cfhsi_cb_ops *drv); -}; - -/* Structure implemented by HSI device. */ -struct cfhsi_ops { - int (*cfhsi_up) (struct cfhsi_ops *dev); - int (*cfhsi_down) (struct cfhsi_ops *dev); - int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_wake_up) (struct cfhsi_ops *dev); - int (*cfhsi_wake_down) (struct cfhsi_ops *dev); - int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status); - int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy); - int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev); - struct cfhsi_cb_ops *cb_ops; -}; - -/* Structure holds status of received CAIF frames processing */ -struct cfhsi_rx_state { - int state; - int nfrms; - int pld_len; - int retries; - bool piggy_desc; -}; - -/* Priority mapping */ -enum { - CFHSI_PRIO_CTL = 0, - CFHSI_PRIO_VI, - CFHSI_PRIO_VO, - CFHSI_PRIO_BEBK, - CFHSI_PRIO_LAST, -}; - -struct cfhsi_config { - u32 inactivity_timeout; - u32 aggregation_timeout; - u32 head_align; - u32 tail_align; - u32 q_high_mark; - u32 q_low_mark; -}; - -/* Structure implemented by CAIF HSI drivers. */ -struct cfhsi { - struct caif_dev_common cfdev; - struct net_device *ndev; - struct platform_device *pdev; - struct sk_buff_head qhead[CFHSI_PRIO_LAST]; - struct cfhsi_cb_ops cb_ops; - struct cfhsi_ops *ops; - int tx_state; - struct cfhsi_rx_state rx_state; - struct cfhsi_config cfg; - int rx_len; - u8 *rx_ptr; - u8 *tx_buf; - u8 *rx_buf; - u8 *rx_flip_buf; - spinlock_t lock; - int flow_off_sent; - struct list_head list; - struct work_struct wake_up_work; - struct work_struct wake_down_work; - struct work_struct out_of_sync_work; - struct workqueue_struct *wq; - wait_queue_head_t wake_up_wait; - wait_queue_head_t wake_down_wait; - wait_queue_head_t flush_fifo_wait; - struct timer_list inactivity_timer; - struct timer_list rx_slowpath_timer; - - /* TX aggregation */ - int aggregation_len; - struct timer_list aggregation_timer; - - unsigned long bits; -}; -extern struct platform_driver cfhsi_driver; - -/** - * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters. - * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before - * taking the HSI wakeline down, in milliseconds. - * When using RT Netlink to create, destroy or configure a CAIF HSI interface, - * enum ifla_caif_hsi is used to specify the configuration attributes. - */ -enum ifla_caif_hsi { - __IFLA_CAIF_HSI_UNSPEC, - __IFLA_CAIF_HSI_INACTIVITY_TOUT, - __IFLA_CAIF_HSI_AGGREGATION_TOUT, - __IFLA_CAIF_HSI_HEAD_ALIGN, - __IFLA_CAIF_HSI_TAIL_ALIGN, - __IFLA_CAIF_HSI_QHIGH_WATERMARK, - __IFLA_CAIF_HSI_QLOW_WATERMARK, - __IFLA_CAIF_HSI_MAX -}; - -struct cfhsi_ops *cfhsi_get_ops(void); - -#endif /* CAIF_HSI_H_ */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 58c2cd417e89..62dd8422e0dc 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7,7 +7,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <linux/ethtool.h> @@ -22,6 +22,7 @@ #include <linux/if_ether.h> #include <linux/ieee80211.h> #include <linux/net.h> +#include <linux/rfkill.h> #include <net/regulatory.h> /** @@ -370,11 +371,18 @@ struct ieee80211_sta_he_cap { * @he_cap: holds the HE capabilities * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a * 6 GHz band channel (and 0 may be valid value). + * @vendor_elems: vendor element(s) to advertise + * @vendor_elems.data: vendor element(s) data + * @vendor_elems.len: vendor element(s) length */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct { + const u8 *data; + unsigned int len; + } vendor_elems; }; /** @@ -534,18 +542,6 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband, } /** - * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA - * @sband: the sband to search for the STA on - * - * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found - */ -static inline const struct ieee80211_sta_he_cap * -ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) -{ - return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION); -} - -/** * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities * @sband: the sband to search for the STA on * @iftype: the iftype to search for @@ -906,6 +902,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) } /** + * cfg80211_any_usable_channels - check for usable channels + * @wiphy: the wiphy to check for + * @band_mask: which bands to check on + * @prohibited_flags: which channels to not consider usable, + * %IEEE80211_CHAN_DISABLED is always taken into account + */ +bool cfg80211_any_usable_channels(struct wiphy *wiphy, + unsigned long band_mask, + u32 prohibited_flags); + +/** * enum survey_info_flags - survey information flags * * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in @@ -1245,7 +1252,26 @@ struct cfg80211_csa_settings { u8 count; }; -#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 +/** + * struct cfg80211_color_change_settings - color change settings + * + * Used for bss color change + * + * @beacon_color_change: beacon data while performing the color countdown + * @counter_offsets_beacon: offsets of the counters within the beacon (tail) + * @counter_offsets_presp: offsets of the counters within the probe response + * @beacon_next: beacon data to be used after the color change + * @count: number of beacons until the color change + * @color: the color used after the change + */ +struct cfg80211_color_change_settings { + struct cfg80211_beacon_data beacon_color_change; + u16 counter_offset_beacon; + u16 counter_offset_presp; + struct cfg80211_beacon_data beacon_next; + u8 count; + u8 color; +}; /** * struct iface_combination_params - input parameters for interface combinations @@ -3522,7 +3548,10 @@ struct cfg80211_pmsr_result { * If neither @trigger_based nor @non_trigger_based is set, * EDCA based ranging will be used. * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either - * @trigger_based or @non_trigger_based is set. + * @trigger_based or @non_trigger_based is set. + * @bss_color: the bss color of the responder. Optional. Set to zero to + * indicate the driver should set the BSS color. Only valid if + * @non_trigger_based or @trigger_based is set. * * See also nl80211 for the respective attribute documentation. */ @@ -3540,6 +3569,7 @@ struct cfg80211_pmsr_ftm_request_peer { u8 burst_duration; u8 ftms_per_burst; u8 ftmr_retries; + u8 bss_color; }; /** @@ -3986,6 +4016,8 @@ struct mgmt_frame_regs { * given TIDs. This callback may sleep. * * @set_sar_specs: Update the SAR (TX power) settings. + * + * @color_change: Initiate a color change. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4313,6 +4345,9 @@ struct cfg80211_ops { const u8 *peer, u8 tids); int (*set_sar_specs)(struct wiphy *wiphy, struct cfg80211_sar_specs *sar); + int (*color_change)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_color_change_settings *params); }; /* @@ -4945,6 +4980,7 @@ struct wiphy_iftype_akm_suites { * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities + * @rfkill: a pointer to the rfkill structure */ struct wiphy { struct mutex mtx; @@ -5087,6 +5123,8 @@ struct wiphy { const struct cfg80211_sar_capa *sar_capa; + struct rfkill *rfkill; + char priv[] __aligned(NETDEV_ALIGN); }; @@ -6661,7 +6699,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy); * wiphy_rfkill_stop_polling - stop polling rfkill * @wiphy: the wiphy */ -void wiphy_rfkill_stop_polling(struct wiphy *wiphy); +static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy) +{ + rfkill_pause_polling(wiphy->rfkill); +} /** * DOC: Vendor commands @@ -8154,6 +8195,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, dev_notice(&(wiphy)->dev, format, ##args) #define wiphy_info(wiphy, format, args...) \ dev_info(&(wiphy)->dev, format, ##args) +#define wiphy_info_once(wiphy, format, args...) \ + dev_info_once(&(wiphy)->dev, format, ##args) #define wiphy_err_ratelimited(wiphy, format, args...) \ dev_err_ratelimited(&(wiphy)->dev, format, ##args) @@ -8201,4 +8244,70 @@ void cfg80211_update_owe_info_event(struct net_device *netdev, */ void cfg80211_bss_flush(struct wiphy *wiphy); +/** + * cfg80211_bss_color_notify - notify about bss color event + * @dev: network device + * @gfp: allocation flags + * @cmd: the actual event we want to notify + * @count: the number of TBTTs until the color change happens + * @color_bitmap: representations of the colors that the local BSS is aware of + */ +int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp, + enum nl80211_commands cmd, u8 count, + u64 color_bitmap); + +/** + * cfg80211_obss_color_collision_notify - notify about bss color collision + * @dev: network device + * @color_bitmap: representations of the colors that the local BSS is aware of + */ +static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, + u64 color_bitmap) +{ + return cfg80211_bss_color_notify(dev, GFP_KERNEL, + NL80211_CMD_OBSS_COLOR_COLLISION, + 0, color_bitmap); +} + +/** + * cfg80211_color_change_started_notify - notify color change start + * @dev: the device on which the color is switched + * @count: the number of TBTTs until the color change happens + * + * Inform the userspace about the color change that has started. + */ +static inline int cfg80211_color_change_started_notify(struct net_device *dev, + u8 count) +{ + return cfg80211_bss_color_notify(dev, GFP_KERNEL, + NL80211_CMD_COLOR_CHANGE_STARTED, + count, 0); +} + +/** + * cfg80211_color_change_aborted_notify - notify color change abort + * @dev: the device on which the color is switched + * + * Inform the userspace about the color change that has aborted. + */ +static inline int cfg80211_color_change_aborted_notify(struct net_device *dev) +{ + return cfg80211_bss_color_notify(dev, GFP_KERNEL, + NL80211_CMD_COLOR_CHANGE_ABORTED, + 0, 0); +} + +/** + * cfg80211_color_change_notify - notify color change completion + * @dev: the device on which the color was switched + * + * Inform the userspace about the color change that has completed. + */ +static inline int cfg80211_color_change_notify(struct net_device *dev) +{ + return cfg80211_bss_color_notify(dev, GFP_KERNEL, + NL80211_CMD_COLOR_CHANGE_COMPLETED, + 0, 0); +} + #endif /* __NET_CFG80211_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 0d05b9e8690b..5b96d5bd6e54 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -80,16 +80,18 @@ static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) return csum16_add(csum, ~addend); } -static inline __wsum -csum_block_add(__wsum csum, __wsum csum2, int offset) +static inline __wsum csum_shift(__wsum sum, int offset) { - u32 sum = (__force u32)csum2; - /* rotate sum to align it with a 16b boundary */ if (offset & 1) - sum = ror32(sum, 8); + return (__force __wsum)ror32((__force u32)sum, 8); + return sum; +} - return csum_add(csum, (__force __wsum)sum); +static inline __wsum +csum_block_add(__wsum csum, __wsum csum2, int offset) +{ + return csum_add(csum, csum_shift(csum2, offset)); } static inline __wsum diff --git a/include/net/compat.h b/include/net/compat.h index 84805bdc4435..595fee069b82 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -71,13 +71,26 @@ struct compat_group_source_req { } __packed; struct compat_group_filter { - __u32 gf_interface; - struct __kernel_sockaddr_storage gf_group - __aligned(4); - __u32 gf_fmode; - __u32 gf_numsrc; - struct __kernel_sockaddr_storage gf_slist[1] - __aligned(4); + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux + __aligned(4); + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1] + __aligned(4); + } __packed; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group + __aligned(4); + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[] + __aligned(4); + } __packed; + }; } __packed; #endif /* NET_COMPAT_H */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 7c984cadfec4..154cf0dbca37 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -32,8 +32,9 @@ struct devlink_dev_stats { struct devlink_ops; struct devlink { - struct list_head list; + u32 index; struct list_head port_list; + struct list_head rate_list; struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; @@ -54,8 +55,9 @@ struct devlink { * port, sb, dpipe, resource, params, region, traps and more. */ u8 reload_failed:1, - reload_enabled:1, - registered:1; + reload_enabled:1; + refcount_t refcount; + struct completion comp; char priv[0] __aligned(NETDEV_ALIGN); }; @@ -133,13 +135,30 @@ struct devlink_port_attrs { }; }; +struct devlink_rate { + struct list_head list; + enum devlink_rate_type type; + struct devlink *devlink; + void *priv; + u64 tx_share; + u64 tx_max; + + struct devlink_rate *parent; + union { + struct devlink_port *devlink_port; + struct { + char *name; + refcount_t refcnt; + }; + }; +}; + struct devlink_port { struct list_head list; struct list_head param_list; struct list_head region_list; struct devlink *devlink; unsigned int index; - bool registered; spinlock_t type_lock; /* Protects type and type_dev * pointer consistency. */ @@ -152,6 +171,8 @@ struct devlink_port { struct delayed_work type_warn_dw; struct list_head reporter_list; struct mutex reporters_lock; /* Protects reporter_list */ + + struct devlink_rate *devlink_rate; }; struct devlink_port_new_attrs { @@ -500,6 +521,9 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -540,6 +564,15 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset" #define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL +#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME "enable_eth" +#define DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME "enable_rdma" +#define DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME "enable_vnet" +#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE DEVLINK_PARAM_TYPE_BOOL + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ @@ -1327,6 +1360,16 @@ struct devlink_ops { enum devlink_trap_action action, struct netlink_ext_ack *extack); /** + * @trap_drop_counter_get: Trap drop counter get function. + * + * Should be used by device drivers to report number of packets + * that have been dropped, and cannot be passed to the devlink + * subsystem by the underlying device. + */ + int (*trap_drop_counter_get)(struct devlink *devlink, + const struct devlink_trap *trap, + u64 *p_drops); + /** * @trap_policer_init: Trap policer initialization function. * * Should be used by device drivers to initialize the trap policer in @@ -1367,8 +1410,8 @@ struct devlink_ops { * * Note: @extack can be NULL when port notifier queries the port function. */ - int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, - u8 *hw_addr, int *hw_addr_len, + int (*port_function_hw_addr_get)(struct devlink_port *port, u8 *hw_addr, + int *hw_addr_len, struct netlink_ext_ack *extack); /** * @port_function_hw_addr_set: Port function's hardware address set function. @@ -1377,7 +1420,7 @@ struct devlink_ops { * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port * function handling for a particular port. */ - int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port, + int (*port_function_hw_addr_set)(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack); /** @@ -1433,8 +1476,7 @@ struct devlink_ops { * * Return: 0 on success, negative value otherwise. */ - int (*port_fn_state_get)(struct devlink *devlink, - struct devlink_port *port, + int (*port_fn_state_get)(struct devlink_port *port, enum devlink_port_fn_state *state, enum devlink_port_fn_opstate *opstate, struct netlink_ext_ack *extack); @@ -1449,10 +1491,33 @@ struct devlink_ops { * * Return: 0 on success, negative value otherwise. */ - int (*port_fn_state_set)(struct devlink *devlink, - struct devlink_port *port, + int (*port_fn_state_set)(struct devlink_port *port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack); + + /** + * Rate control callbacks. + */ + int (*rate_leaf_tx_share_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); + int (*rate_leaf_tx_max_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); + int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); + int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); + int (*rate_node_new)(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack); + int (*rate_node_del)(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack); + int (*rate_leaf_parent_set)(struct devlink_rate *child, + struct devlink_rate *parent, + void *priv_child, void *priv_parent, + struct netlink_ext_ack *extack); + int (*rate_node_parent_set)(struct devlink_rate *child, + struct devlink_rate *parent, + void *priv_child, void *priv_parent, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) @@ -1487,9 +1552,21 @@ static inline struct devlink *netdev_to_devlink(struct net_device *dev) struct ib_device; struct net *devlink_net(const struct devlink *devlink); -void devlink_net_set(struct devlink *devlink, struct net *net); -struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size); -int devlink_register(struct devlink *devlink, struct device *dev); +/* This call is intended for software devices that can create + * devlink instances in other namespaces than init_net. + * + * Drivers that operate on real HW must use devlink_alloc() instead. + */ +struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, + size_t priv_size, struct net *net, + struct device *dev); +static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, + size_t priv_size, + struct device *dev) +{ + return devlink_alloc_ns(ops, priv_size, &init_net, dev); +} +int devlink_register(struct devlink *devlink); void devlink_unregister(struct devlink *devlink); void devlink_reload_enable(struct devlink *devlink); void devlink_reload_disable(struct devlink *devlink); @@ -1512,6 +1589,9 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u32 sf, bool external); +int devlink_rate_leaf_create(struct devlink_port *port, void *priv); +void devlink_rate_leaf_destroy(struct devlink_port *devlink_port); +void devlink_rate_nodes_destroy(struct devlink *devlink); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, @@ -1567,8 +1647,16 @@ int devlink_params_register(struct devlink *devlink, void devlink_params_unregister(struct devlink *devlink, const struct devlink_param *params, size_t params_count); +int devlink_param_register(struct devlink *devlink, + const struct devlink_param *param); +void devlink_param_unregister(struct devlink *devlink, + const struct devlink_param *param); void devlink_params_publish(struct devlink *devlink); void devlink_params_unpublish(struct devlink *devlink); +void devlink_param_publish(struct devlink *devlink, + const struct devlink_param *param); +void devlink_param_unpublish(struct devlink *devlink, + const struct devlink_param *param); int devlink_port_params_register(struct devlink_port *devlink_port, const struct devlink_param *params, size_t params_count); diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index ccc6e9df178b..ddd6565957b3 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -29,7 +29,7 @@ struct dn_fib_nh { struct dn_fib_info { struct dn_fib_info *fib_next; struct dn_fib_info *fib_prev; - int fib_treeref; + refcount_t fib_treeref; refcount_t fib_clntref; int fib_dead; unsigned int fib_flags; diff --git a/include/net/dsa.h b/include/net/dsa.h index e1a2610a0e06..f9a17145255a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -50,6 +50,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 #define DSA_TAG_PROTO_SEVILLE_VALUE 21 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 +#define DSA_TAG_PROTO_SJA1110_VALUE 23 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -75,23 +76,18 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE, DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, + DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, }; -struct packet_type; struct dsa_switch; struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); - struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, int *offset); - /* Used to determine which traffic should match the DSA filter in - * eth_type_trans, and which, if any, should bypass it and be processed - * as regular on the master net device. - */ - bool (*filter)(const struct sk_buff *skb, struct net_device *dev); - unsigned int overhead; + unsigned int needed_headroom; + unsigned int needed_tailroom; const char *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC @@ -100,7 +96,6 @@ struct dsa_device_ops { * its RX filter. */ bool promisc_on_master; - bool tail_tag; }; /* This structure defines the control interfaces that are overlayed by the @@ -109,8 +104,8 @@ struct dsa_device_ops { * function pointers. */ struct dsa_netdevice_ops { - int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, - int cmd); + int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, + int cmd); }; #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" @@ -157,6 +152,9 @@ struct dsa_switch_tree { */ struct net_device **lags; unsigned int lags_len; + + /* Track the largest switch index within a tree */ + unsigned int last_switch; }; #define dsa_lags_foreach_id(_id, _dst) \ @@ -236,9 +234,7 @@ struct dsa_port { /* Copies for faster access in master receive hot path */ struct dsa_switch_tree *dst; - struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt); - bool (*filter)(const struct sk_buff *skb, struct net_device *dev); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); enum { DSA_PORT_TYPE_UNUSED = 0, @@ -255,8 +251,11 @@ struct dsa_port { struct device_node *dn; unsigned int ageing_time; bool vlan_filtering; + /* Managed by DSA on user ports and by drivers on CPU and DSA ports */ + bool learning; u8 stp_state; struct net_device *bridge_dev; + int bridge_num; struct devlink_port devlink_port; bool devlink_port_setup; struct phylink *pl; @@ -283,6 +282,12 @@ struct dsa_port { */ const struct dsa_netdevice_ops *netdev_ops; + /* List of MAC addresses that must be forwarded on this port. + * These are only valid on CPU ports and DSA links. + */ + struct list_head fdbs; + struct list_head mdbs; + bool setup; }; @@ -297,6 +302,13 @@ struct dsa_link { struct list_head list; }; +struct dsa_mac_addr { + unsigned char addr[ETH_ALEN]; + u16 vid; + refcount_t refcount; + struct list_head list; +}; + struct dsa_switch { bool setup; @@ -337,6 +349,9 @@ struct dsa_switch { unsigned int ageing_time_min; unsigned int ageing_time_max; + /* Storage for drivers using tag_8021q */ + struct dsa_8021q_context *tag_8021q_ctx; + /* devlink used to represent this switch device */ struct devlink *devlink; @@ -348,6 +363,9 @@ struct dsa_switch { */ bool vlan_filtering_is_global; + /* Keep VLAN filtering enabled on ports not offloading any upper. */ + bool needs_standalone_vlan_filtering; + /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges * that have vlan_filtering=0. All drivers should ideally set this (and * then the option would get removed), but it is unknown whether this @@ -392,6 +410,13 @@ struct dsa_switch { */ unsigned int num_lag_ids; + /* Drivers that support bridge forwarding offload should set this to + * the maximum number of bridges spanning the same switch tree (or all + * trees, in the case of cross-tree bridging support) that can be + * offloaded. + */ + unsigned int num_fwd_offloading_bridges; + size_t num_ports; }; @@ -407,6 +432,21 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p) return NULL; } +static inline bool dsa_port_is_dsa(struct dsa_port *port) +{ + return port->type == DSA_PORT_TYPE_DSA; +} + +static inline bool dsa_port_is_cpu(struct dsa_port *port) +{ + return port->type == DSA_PORT_TYPE_CPU; +} + +static inline bool dsa_port_is_user(struct dsa_port *dp) +{ + return dp->type == DSA_PORT_TYPE_USER; +} + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; @@ -474,6 +514,32 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port) return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index); } +/* Return true if this is the local port used to reach the CPU port */ +static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port) +{ + if (dsa_is_unused_port(ds, port)) + return false; + + return port == dsa_upstream_port(ds, port); +} + +/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning + * that the routing port from @downstream_ds to @upstream_ds is also the port + * which @downstream_ds uses to reach its dedicated CPU. + */ +static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds, + struct dsa_switch *downstream_ds) +{ + int routing_port; + + if (upstream_ds == downstream_ds) + return true; + + routing_port = dsa_routing_port(downstream_ds, upstream_ds->index); + + return dsa_is_upstream_port(downstream_ds, routing_port); +} + static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) { const struct dsa_switch *ds = dp->ds; @@ -634,6 +700,14 @@ struct dsa_switch_ops { struct net_device *bridge); void (*port_bridge_leave)(struct dsa_switch *ds, int port, struct net_device *bridge); + /* Called right after .port_bridge_join() */ + int (*port_bridge_tx_fwd_offload)(struct dsa_switch *ds, int port, + struct net_device *bridge, + int bridge_num); + /* Called right before .port_bridge_leave() */ + void (*port_bridge_tx_fwd_unoffload)(struct dsa_switch *ds, int port, + struct net_device *bridge, + int bridge_num); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); void (*port_fast_age)(struct dsa_switch *ds, int port); @@ -643,8 +717,6 @@ struct dsa_switch_ops { int (*port_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); - int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter, - struct netlink_ext_ack *extack); /* * VLAN support @@ -813,6 +885,13 @@ struct dsa_switch_ops { const struct switchdev_obj_ring_role_mrp *mrp); int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port, const struct switchdev_obj_ring_role_mrp *mrp); + + /* + * tag_8021q operations + */ + int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, + u16 flags); + int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ @@ -898,15 +977,6 @@ static inline bool netdev_uses_dsa(const struct net_device *dev) return false; } -static inline bool dsa_can_decode(const struct sk_buff *skb, - struct net_device *dev) -{ -#if IS_ENABLED(CONFIG_NET_DSA) - return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev); -#endif - return false; -} - /* All DSA tags that push the EtherType to the right (basically all except tail * tags, which don't break dissection) can be treated the same from the * perspective of the flow dissector. @@ -926,7 +996,7 @@ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb, { #if IS_ENABLED(CONFIG_NET_DSA) const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops; - int tag_len = ops->overhead; + int tag_len = ops->needed_headroom; *offset = tag_len; *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1]; @@ -947,8 +1017,8 @@ static inline int __dsa_netdevice_ops_check(struct net_device *dev) return 0; } -static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) +static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) { const struct dsa_netdevice_ops *ops; int err; @@ -959,11 +1029,11 @@ static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, ops = dev->dsa_ptr->netdev_ops; - return ops->ndo_do_ioctl(dev, ifr, cmd); + return ops->ndo_eth_ioctl(dev, ifr, cmd); } #else -static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) +static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) { return -EOPNOTSUPP; } diff --git a/include/net/dst.h b/include/net/dst.h index 75b1e734e9c2..a057319aabef 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -277,6 +277,7 @@ static inline void skb_dst_drop(struct sk_buff *skb) static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) { + nskb->slow_gro |= !!refdst; nskb->_skb_refdst = refdst; if (!(nskb->_skb_refdst & SKB_DST_NOREF)) dst_clone(skb_dst(nskb)); @@ -316,6 +317,7 @@ static inline bool skb_dst_force(struct sk_buff *skb) dst = NULL; skb->_skb_refdst = (unsigned long)dst; + skb->slow_gro |= !!dst; } return skb->_skb_refdst != 0UL; diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 56cb3c38569a..14efa0ded75d 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb) return &md_dst->u.tun_info; dst = skb_dst(skb); - if (dst && dst->lwtstate) + if (dst && dst->lwtstate && + (dst->lwtstate->type == LWTUNNEL_ENCAP_IP || + dst->lwtstate->type == LWTUNNEL_ENCAP_IP6)) return lwt_tun_info(dst->lwtstate); return NULL; diff --git a/include/net/flow.h b/include/net/flow.h index 6f5e70240071..58beb16a49b8 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -194,7 +194,7 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4) { - return &(flowi4_to_flowi(fl4)->u.__fl_common); + return &(fl4->__fl_common); } static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) @@ -204,7 +204,7 @@ static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6) { - return &(flowi6_to_flowi(fl6)->u.__fl_common); + return &(fl6->__fl_common); } static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index dc5c1e69cd9f..3961461d9c8b 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -293,7 +293,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action) } /** - * flow_action_has_one_action() - check if exactly one action is present + * flow_offload_has_one_action() - check if exactly one action is present * @action: tc filter flow offload action * * Returns true if exactly one action is present. @@ -451,6 +451,7 @@ struct flow_block_offload { struct list_head *driver_block_list; struct netlink_ext_ack *extack; struct Qdisc *sch; + struct list_head *cb_list_head; }; enum tc_setup_type; diff --git a/include/net/icmp.h b/include/net/icmp.h index fd84adc47963..caddf4a59ad1 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -57,5 +57,6 @@ int icmp_rcv(struct sk_buff *skb); int icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); void icmp_out_count(struct net *net, unsigned char type); +bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr); #endif /* _ICMP_H */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c0854933e24f..11630351c978 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -43,6 +43,11 @@ struct ieee80211_radiotap_header { * @it_present: (first) present word */ __le32 it_present; + + /** + * @it_optional: all remaining presence bitmaps + */ + __le32 it_optional[]; } __packed; /* version is always 0 */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 71bb4cc4d05d..653e7d0f65cb 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -82,9 +82,6 @@ struct ip6_sf_socklist { struct in6_addr sl_addr[]; }; -#define IP6_SFLSIZE(count) (sizeof(struct ip6_sf_socklist) + \ - (count) * sizeof(struct in6_addr)) - #define IP6_SFBLOCK 10 /* allocate this many at once */ struct ipv6_mc_socklist { @@ -213,6 +210,8 @@ struct inet6_dev { unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; + + unsigned int ra_mtu; }; static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 3c8c59471bc1..b06c2d02ec84 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -135,7 +135,7 @@ struct inet_connection_sock { u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; -#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64)) +#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ca6a3ea9057e..f72ec113ae56 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -160,6 +160,12 @@ struct inet_hashinfo { ____cacheline_aligned_in_smp; }; +#define inet_lhash2_for_each_icsk_continue(__icsk) \ + hlist_for_each_entry_continue(__icsk, icsk_listen_portaddr_node) + +#define inet_lhash2_for_each_icsk(__icsk, list) \ + hlist_for_each_entry(__icsk, list, icsk_listen_portaddr_node) + #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \ hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node) diff --git a/include/net/ioam6.h b/include/net/ioam6.h new file mode 100644 index 000000000000..3c2993bc48c8 --- /dev/null +++ b/include/net/ioam6.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM implementation + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ + +#ifndef _NET_IOAM6_H +#define _NET_IOAM6_H + +#include <linux/net.h> +#include <linux/ipv6.h> +#include <linux/ioam6.h> +#include <linux/rhashtable-types.h> + +struct ioam6_namespace { + struct rhash_head head; + struct rcu_head rcu; + + struct ioam6_schema __rcu *schema; + + __be16 id; + __be32 data; + __be64 data_wide; +}; + +struct ioam6_schema { + struct rhash_head head; + struct rcu_head rcu; + + struct ioam6_namespace __rcu *ns; + + u32 id; + int len; + __be32 hdr; + + u8 data[0]; +}; + +struct ioam6_pernet_data { + struct mutex lock; + struct rhashtable namespaces; + struct rhashtable schemas; +}; + +static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net) +{ +#if IS_ENABLED(CONFIG_IPV6) + return net->ipv6.ioam6_data; +#else + return NULL; +#endif +} + +struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id); +void ioam6_fill_trace_data(struct sk_buff *skb, + struct ioam6_namespace *ns, + struct ioam6_trace_hdr *trace); + +int ioam6_init(void); +void ioam6_exit(void); + +int ioam6_iptunnel_init(void); +void ioam6_iptunnel_exit(void); + +#endif /* _NET_IOAM6_H */ diff --git a/include/net/ip.h b/include/net/ip.h index e20874059f82..9192444f2964 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -31,6 +31,7 @@ #include <net/flow.h> #include <net/flow_dissector.h> #include <net/netns/hash.h> +#include <net/lwtunnel.h> #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ #define IPV4_MIN_MTU 68 /* RFC 791 */ @@ -435,32 +436,49 @@ static inline bool ip_sk_ignore_df(const struct sock *sk) static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { + const struct rtable *rt = container_of(dst, struct rtable, dst); struct net *net = dev_net(dst->dev); unsigned int mtu; if (net->ipv4.sysctl_ip_fwd_use_pmtu || ip_mtu_locked(dst) || - !forwarding) - return dst_mtu(dst); + !forwarding) { + mtu = rt->rt_pmtu; + if (mtu && time_before(jiffies, rt->dst.expires)) + goto out; + } /* 'forwarding = true' case should always honour route mtu */ mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) - return mtu; + goto out; + + mtu = READ_ONCE(dst->dev->mtu); + + if (unlikely(ip_mtu_locked(dst))) { + if (rt->rt_uses_gateway && mtu > 576) + mtu = 576; + } - return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); +out: + mtu = min_t(unsigned int, mtu, IP_MAX_MTU); + + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } static inline unsigned int ip_skb_dst_mtu(struct sock *sk, const struct sk_buff *skb) { + unsigned int mtu; + if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } - return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); } struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 15b7fbe6b15c..c412dde4d67d 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i) return false; } -/* Function to safely get fn->sernum for passed in rt +/* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely * Return false if not @@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, if (fn) { *cookie = fn->fn_sernum; - /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ + /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */ smp_rmb(); status = true; } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f51a118bfce8..5efd0b71dc67 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -263,13 +263,20 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); -static inline int ip6_skb_dst_mtu(struct sk_buff *skb) +static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) { + unsigned int mtu; + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; - return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? - skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); + if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { + mtu = READ_ONCE(skb_dst(skb)->dev->mtu); + mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); + } else + mtu = dst_mtu(skb_dst(skb)); + + return mtu; } static inline bool ip6_sk_accept_pmtu(const struct sock *sk) @@ -309,15 +316,16 @@ static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info * !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws); } -static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) +static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst, + bool forwarding) { struct inet6_dev *idev; unsigned int mtu; - if (dst_metric_locked(dst, RTAX_MTU)) { + if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) { mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) - return mtu; + goto out; } mtu = IPV6_MIN_MTU; @@ -327,7 +335,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) mtu = idev->cnf.mtu6; rcu_read_unlock(); - return mtu; +out: + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } u32 ip6_mtu_from_fib6(const struct fib6_result *res, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a914f33f3ed5..21c5386d4a6d 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -133,7 +133,7 @@ struct fib_info { struct hlist_node fib_lhash; struct list_head nh_list; struct net *fib_net; - int fib_treeref; + refcount_t fib_treeref; refcount_t fib_clntref; unsigned int fib_flags; unsigned char fib_dead; @@ -466,6 +466,49 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags); void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig); +/* Fields used for sysctl_fib_multipath_hash_fields. + * Common to IPv4 and IPv6. + * + * Add new fields at the end. This is user API. + */ +#define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0) +#define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1) +#define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2) +#define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3) +#define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4) +#define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5) +#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6) +#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7) +#define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8) +#define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9) +#define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10) +#define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11) + +#define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \ + (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \ + FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \ + FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \ + FIB_MULTIPATH_HASH_FIELD_DST_PORT) + +#define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \ + (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \ + FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \ + FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \ + FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) + +#define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \ + (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \ + FIB_MULTIPATH_HASH_FIELD_INNER_MASK) + +#define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \ + (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ + FIB_MULTIPATH_HASH_FIELD_DST_IP | \ + FIB_MULTIPATH_HASH_FIELD_IP_PROTO) + #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 548b65bd3973..bc3b13ec93c9 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -270,7 +270,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const u8 proto, int tunnel_hlen); int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); -int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 448bf2b34759..f2d0ecc257bb 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -926,11 +926,19 @@ static inline int ip6_multipath_hash_policy(const struct net *net) { return net->ipv6.sysctl.multipath_hash_policy; } +static inline u32 ip6_multipath_hash_fields(const struct net *net) +{ + return net->ipv6.sysctl.multipath_hash_fields; +} #else static inline int ip6_multipath_hash_policy(const struct net *net) { return 0; } +static inline u32 ip6_multipath_hash_fields(const struct net *net) +{ + return 0; +} #endif /* diff --git a/include/net/ipx.h b/include/net/ipx.h deleted file mode 100644 index 9d1342807b59..000000000000 --- a/include/net/ipx.h +++ /dev/null @@ -1,171 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _NET_INET_IPX_H_ -#define _NET_INET_IPX_H_ -/* - * The following information is in its entirety obtained from: - * - * Novell 'IPX Router Specification' Version 1.10 - * Part No. 107-000029-001 - * - * Which is available from ftp.novell.com - */ - -#include <linux/netdevice.h> -#include <net/datalink.h> -#include <linux/ipx.h> -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/refcount.h> - -struct ipx_address { - __be32 net; - __u8 node[IPX_NODE_LEN]; - __be16 sock; -}; - -#define ipx_broadcast_node "\377\377\377\377\377\377" -#define ipx_this_node "\0\0\0\0\0\0" - -#define IPX_MAX_PPROP_HOPS 8 - -struct ipxhdr { - __be16 ipx_checksum __packed; -#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF) - __be16 ipx_pktsize __packed; - __u8 ipx_tctrl; - __u8 ipx_type; -#define IPX_TYPE_UNKNOWN 0x00 -#define IPX_TYPE_RIP 0x01 /* may also be 0 */ -#define IPX_TYPE_SAP 0x04 /* may also be 0 */ -#define IPX_TYPE_SPX 0x05 /* SPX protocol */ -#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */ -#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */ - struct ipx_address ipx_dest __packed; - struct ipx_address ipx_source __packed; -}; - -/* From af_ipx.c */ -extern int sysctl_ipx_pprop_broadcasting; - -struct ipx_interface { - /* IPX address */ - __be32 if_netnum; - unsigned char if_node[IPX_NODE_LEN]; - refcount_t refcnt; - - /* physical device info */ - struct net_device *if_dev; - struct datalink_proto *if_dlink; - __be16 if_dlink_type; - - /* socket support */ - unsigned short if_sknum; - struct hlist_head if_sklist; - spinlock_t if_sklist_lock; - - /* administrative overhead */ - int if_ipx_offset; - unsigned char if_internal; - unsigned char if_primary; - - struct list_head node; /* node in ipx_interfaces list */ -}; - -struct ipx_route { - __be32 ir_net; - struct ipx_interface *ir_intrfc; - unsigned char ir_routed; - unsigned char ir_router_node[IPX_NODE_LEN]; - struct list_head node; /* node in ipx_routes list */ - refcount_t refcnt; -}; - -struct ipx_cb { - u8 ipx_tctrl; - __be32 ipx_dest_net; - __be32 ipx_source_net; - struct { - __be32 netnum; - int index; - } last_hop; -}; - -#include <net/sock.h> - -struct ipx_sock { - /* struct sock has to be the first member of ipx_sock */ - struct sock sk; - struct ipx_address dest_addr; - struct ipx_interface *intrfc; - __be16 port; -#ifdef CONFIG_IPX_INTERN - unsigned char node[IPX_NODE_LEN]; -#endif - unsigned short type; - /* - * To handle special ncp connection-handling sockets for mars_nwe, - * the connection number must be stored in the socket. - */ - unsigned short ipx_ncp_conn; -}; - -static inline struct ipx_sock *ipx_sk(struct sock *sk) -{ - return (struct ipx_sock *)sk; -} - -#define IPX_SKB_CB(__skb) ((struct ipx_cb *)&((__skb)->cb[0])) - -#define IPX_MIN_EPHEMERAL_SOCKET 0x4000 -#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff - -extern struct list_head ipx_routes; -extern rwlock_t ipx_routes_lock; - -extern struct list_head ipx_interfaces; -struct ipx_interface *ipx_interfaces_head(void); -extern spinlock_t ipx_interfaces_lock; - -extern struct ipx_interface *ipx_primary_net; - -int ipx_proc_init(void); -void ipx_proc_exit(void); - -const char *ipx_frame_name(__be16); -const char *ipx_device_name(struct ipx_interface *intrfc); - -static __inline__ void ipxitf_hold(struct ipx_interface *intrfc) -{ - refcount_inc(&intrfc->refcnt); -} - -void ipxitf_down(struct ipx_interface *intrfc); -struct ipx_interface *ipxitf_find_using_net(__be32 net); -int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node); -__be16 ipx_cksum(struct ipxhdr *packet, int length); -int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, - unsigned char *node); -void ipxrtr_del_routes(struct ipx_interface *intrfc); -int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, - struct msghdr *msg, size_t len, int noblock); -int ipxrtr_route_skb(struct sk_buff *skb); -struct ipx_route *ipxrtr_lookup(__be32 net); -int ipxrtr_ioctl(unsigned int cmd, void __user *arg); - -static __inline__ void ipxitf_put(struct ipx_interface *intrfc) -{ - if (refcount_dec_and_test(&intrfc->refcnt)) - ipxitf_down(intrfc); -} - -static __inline__ void ipxrtr_hold(struct ipx_route *rt) -{ - refcount_inc(&rt->refcnt); -} - -static __inline__ void ipxrtr_put(struct ipx_route *rt) -{ - if (refcount_dec_and_test(&rt->refcnt)) - kfree(rt); -} -#endif /* _NET_INET_IPX_H_ */ diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index c0f0a13ed818..49aa79c7b278 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -15,9 +15,11 @@ #include <linux/if_ether.h> /* Lengths of frame formats */ -#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ -#define LLC_PDU_LEN_S 4 -#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ +#define LLC_PDU_LEN_S 4 +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ +/* header and 1 control byte and XID info */ +#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info)) /* Known SAP addresses */ #define LLC_GLOBAL_SAP 0xFF #define LLC_NULL_SAP 0x00 /* not network-layer visible */ @@ -50,9 +52,10 @@ #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ #define LLC_PDU_TYPE_MASK 0x03 -#define LLC_PDU_TYPE_I 0 /* first bit */ -#define LLC_PDU_TYPE_S 1 /* first two bits */ -#define LLC_PDU_TYPE_U 3 /* first two bits */ +#define LLC_PDU_TYPE_I 0 /* first bit */ +#define LLC_PDU_TYPE_S 1 /* first two bits */ +#define LLC_PDU_TYPE_U 3 /* first two bits */ +#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */ #define LLC_PDU_TYPE_IS_I(pdu) \ ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0) @@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, u8 ssap, u8 dsap, u8 cr) { - const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; + int hlen = 4; /* default value for I and S types */ struct llc_pdu_un *pdu; + switch (type) { + case LLC_PDU_TYPE_U: + hlen = 3; + break; + case LLC_PDU_TYPE_U_XID: + hlen = 6; + break; + } + skb_push(skb, hlen); skb_reset_network_header(skb); pdu = llc_pdu_un_hdr(skb); @@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ xid_info->type = svcs_supported; xid_info->rw = rx_window << 1; /* size of receive window */ - skb_put(skb, sizeof(struct llc_xid_info)); + + /* no need to push/put since llc_pdu_header_init() has already + * pushed 3 + 3 bytes + */ } /** diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 05cfd6ff6528..6f15e6fa154e 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -51,6 +51,9 @@ struct lwtunnel_encap_ops { }; #ifdef CONFIG_LWTUNNEL + +DECLARE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled); + void lwtstate_free(struct lwtunnel_state *lws); static inline struct lwtunnel_state * diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 445b66c6eb7e..af0fc13cea34 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation + * Copyright (C) 2018 - 2021 Intel Corporation */ #ifndef MAC80211_H @@ -526,6 +526,7 @@ struct ieee80211_fils_discovery { * @twt_responder: does this BSS support TWT requester (relevant for managed * mode only, set if the AP advertises TWT responder role) * @twt_protected: does this BSS support protected TWT frames + * @twt_broadcast: does this BSS support broadcast TWT * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -642,6 +643,7 @@ struct ieee80211_bss_conf { bool twt_requester; bool twt_responder; bool twt_protected; + bool twt_broadcast; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -1709,6 +1711,10 @@ enum ieee80211_offload_flags { * protected by fq->lock. * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see * &enum ieee80211_offload_flags. + * @color_change_active: marks whether a color change is ongoing. Internally it is + * write-protected by sdata_lock and local->mtx so holding either is fine + * for read access. + * @color_change_color: the bss color that will be used after the change. */ struct ieee80211_vif { enum nl80211_iftype type; @@ -1737,6 +1743,9 @@ struct ieee80211_vif { bool txqs_stopped[IEEE80211_NUM_ACS]; + bool color_change_active; + u8 color_change_color; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; @@ -3345,6 +3354,21 @@ enum ieee80211_reconfig_type { }; /** + * struct ieee80211_prep_tx_info - prepare TX information + * @duration: if non-zero, hint about the required duration, + * only used with the mgd_prepare_tx() method. + * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc) + * @success: whether the frame exchange was successful, only + * used with the mgd_complete_tx() method, and then only + * valid for auth and (re)assoc. + */ +struct ieee80211_prep_tx_info { + u16 duration; + u16 subtype; + u8 success:1; +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -3756,9 +3780,13 @@ enum ieee80211_reconfig_type { * frame in case that no beacon was heard from the AP/P2P GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. - * If duration is greater than zero, mac80211 hints to the driver the - * duration for which the operation is requested. + * Additional information is passed in the &struct ieee80211_prep_tx_info + * data. If duration there is greater than zero, mac80211 hints to the + * driver the duration for which the operation is requested. * The callback is optional and can (should!) sleep. + * @mgd_complete_tx: Notify the driver that the response frame for a previously + * transmitted frame announced with @mgd_prepare_tx was received, the data + * is filled similarly to @mgd_prepare_tx though the duration is not used. * * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending * a TDLS discovery-request, we expect a reply to arrive on the AP's @@ -3898,6 +3926,13 @@ enum ieee80211_reconfig_type { * @set_sar_specs: Update the SAR (TX power) settings. * @sta_set_decap_offload: Called to notify the driver when a station is allowed * to use rx decapsulation offload + * @add_twt_setup: Update hw with TWT agreement parameters received from the peer. + * This callback allows the hw to check if requested parameters + * are supported and if there is enough room for a new agreement. + * The hw is expected to set agreement result in the req_type field of + * twt structure. + * @twt_teardown_request: Update the hw with TWT teardown request received + * from the peer. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4109,7 +4144,10 @@ struct ieee80211_ops { void (*mgd_prepare_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 duration); + struct ieee80211_prep_tx_info *info); + void (*mgd_complete_tx)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -4218,6 +4256,11 @@ struct ieee80211_ops { void (*sta_set_decap_offload)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled); + void (*add_twt_setup)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt); + void (*twt_teardown_request)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 flowid); }; /** @@ -4984,6 +5027,16 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif); bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif); /** + * ieee80211_color_change_finish - notify mac80211 about color change + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * After a color change announcement was scheduled and the counter in this + * announcement hits 1, this function must be called by the driver to + * notify mac80211 that the color can be changed + */ +void ieee80211_color_change_finish(struct ieee80211_vif *vif); + +/** * ieee80211_proberesp_get - retrieve a Probe Response template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. @@ -5537,7 +5590,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, * * This function iterates over the interfaces associated with a given * hardware that are currently active and calls the callback for them. - * This version can only be used while holding the RTNL. + * This version can only be used while holding the wiphy mutex. * * @hw: the hardware struct of which the interfaces should be iterated over * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags @@ -6184,6 +6237,11 @@ enum rate_control_capabilities { * otherwise the NSS difference doesn't bother us. */ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0), + /** + * @RATE_CTRL_CAPA_AMPDU_TRIGGER: + * mac80211 should start A-MPDU sessions on tx + */ + RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1), }; struct rate_control_ops { @@ -6392,7 +6450,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, /** * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header - * of injected frames + * of injected frames. + * + * To accurately parse and take into account rate and retransmission fields, + * you must initialize the chandef field in the ieee80211_tx_info structure + * of the skb before calling this function. + * * @skb: packet injected by userspace * @dev: the &struct device of this 802.11 device */ @@ -6571,9 +6634,6 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) { } -void __ieee80211_schedule_txq(struct ieee80211_hw *hw, - struct ieee80211_txq *txq, bool force); - /** * ieee80211_schedule_txq - schedule a TXQ for transmission * @@ -6586,11 +6646,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw, * The driver may call this function if it has buffered packets for * this TXQ internally. */ -static inline void -ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) -{ - __ieee80211_schedule_txq(hw, txq, true); -} +void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); /** * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() @@ -6602,12 +6658,8 @@ ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) * The driver may set force=true if it has buffered packets for this TXQ * internally. */ -static inline void -ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, - bool force) -{ - __ieee80211_schedule_txq(hw, txq, force); -} +void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, + bool force); /** * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit @@ -6747,4 +6799,34 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw, struct sk_buff * ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + +/** + * ieeee80211_obss_color_collision_notify - notify userland about a BSS color + * collision. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is + * aware of. + */ +void +ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif, + u64 color_bitmap); + +/** + * ieee80211_is_tx_data - check if frame is a data frame + * + * The function is used to check if a frame is a data frame. Frames with + * hardware encapsulation enabled are data frames. + * + * @skb: the frame to be transmitted. + */ +static inline bool ieee80211_is_tx_data(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *) skb->data; + + return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP || + ieee80211_is_data(hdr->frame_control); +} + #endif /* MAC80211_H */ diff --git a/include/net/macsec.h b/include/net/macsec.h index 52874cdfe226..d6fa6b97f6ef 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -241,7 +241,7 @@ struct macsec_context { struct macsec_rx_sc *rx_sc; struct { unsigned char assoc_num; - u8 key[MACSEC_KEYID_LEN]; + u8 key[MACSEC_MAX_KEY_LEN]; union { struct macsec_rx_sa *rx_sa; struct macsec_tx_sa *tx_sa; diff --git a/include/net/mctp.h b/include/net/mctp.h new file mode 100644 index 000000000000..a824d47c3c6d --- /dev/null +++ b/include/net/mctp.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Management Component Transport Protocol (MCTP) + * + * Copyright (c) 2021 Code Construct + * Copyright (c) 2021 Google + */ + +#ifndef __NET_MCTP_H +#define __NET_MCTP_H + +#include <linux/bits.h> +#include <linux/mctp.h> +#include <net/net_namespace.h> +#include <net/sock.h> + +/* MCTP packet definitions */ +struct mctp_hdr { + u8 ver; + u8 dest; + u8 src; + u8 flags_seq_tag; +}; + +#define MCTP_VER_MIN 1 +#define MCTP_VER_MAX 1 + +/* Definitions for flags_seq_tag field */ +#define MCTP_HDR_FLAG_SOM BIT(7) +#define MCTP_HDR_FLAG_EOM BIT(6) +#define MCTP_HDR_FLAG_TO BIT(3) +#define MCTP_HDR_FLAGS GENMASK(5, 3) +#define MCTP_HDR_SEQ_SHIFT 4 +#define MCTP_HDR_SEQ_MASK GENMASK(1, 0) +#define MCTP_HDR_TAG_SHIFT 0 +#define MCTP_HDR_TAG_MASK GENMASK(2, 0) + +#define MCTP_HEADER_MAXLEN 4 + +#define MCTP_INITIAL_DEFAULT_NET 1 + +static inline bool mctp_address_ok(mctp_eid_t eid) +{ + return eid >= 8 && eid < 255; +} + +static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb) +{ + return (struct mctp_hdr *)skb_network_header(skb); +} + +/* socket implementation */ +struct mctp_sock { + struct sock sk; + + /* bind() params */ + int bind_net; + mctp_eid_t bind_addr; + __u8 bind_type; + + /* list of mctp_sk_key, for incoming tag lookup. updates protected + * by sk->net->keys_lock + */ + struct hlist_head keys; +}; + +/* Key for matching incoming packets to sockets or reassembly contexts. + * Packets are matched on (src,dest,tag). + * + * Lifetime requirements: + * + * - keys are free()ed via RCU + * + * - a mctp_sk_key contains a reference to a struct sock; this is valid + * for the life of the key. On sock destruction (through unhash), the key is + * removed from lists (see below), and will not be observable after a RCU + * grace period. + * + * any RX occurring within that grace period may still queue to the socket, + * but will hit the SOCK_DEAD case before the socket is freed. + * + * - these mctp_sk_keys appear on two lists: + * 1) the struct mctp_sock->keys list + * 2) the struct netns_mctp->keys list + * + * updates to either list are performed under the netns_mctp->keys + * lock. + * + * - a key may have a sk_buff attached as part of an in-progress message + * reassembly (->reasm_head). The reassembly context is protected by + * reasm_lock, which may be acquired with the keys lock (above) held, if + * necessary. Consequently, keys lock *cannot* be acquired with the + * reasm_lock held. + * + * - there are two destruction paths for a mctp_sk_key: + * + * - through socket unhash (see mctp_sk_unhash). This performs the list + * removal under keys_lock. + * + * - where a key is established to receive a reply message: after receiving + * the (complete) reply, or during reassembly errors. Here, we clean up + * the reassembly context (marking reasm_dead, to prevent another from + * starting), and remove the socket from the netns & socket lists. + */ +struct mctp_sk_key { + mctp_eid_t peer_addr; + mctp_eid_t local_addr; + __u8 tag; /* incoming tag match; invert TO for local */ + + /* we hold a ref to sk when set */ + struct sock *sk; + + /* routing lookup list */ + struct hlist_node hlist; + + /* per-socket list */ + struct hlist_node sklist; + + /* incoming fragment reassembly context */ + spinlock_t reasm_lock; + struct sk_buff *reasm_head; + struct sk_buff **reasm_tailp; + bool reasm_dead; + u8 last_seq; + + struct rcu_head rcu; +}; + +struct mctp_skb_cb { + unsigned int magic; + unsigned int net; + mctp_eid_t src; +}; + +/* skb control-block accessors with a little extra debugging for initial + * development. + * + * TODO: remove checks & mctp_skb_cb->magic; replace callers of __mctp_cb + * with mctp_cb(). + * + * __mctp_cb() is only for the initial ingress code; we should see ->magic set + * at all times after this. + */ +static inline struct mctp_skb_cb *__mctp_cb(struct sk_buff *skb) +{ + struct mctp_skb_cb *cb = (void *)skb->cb; + + cb->magic = 0x4d435450; + return cb; +} + +static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb) +{ + struct mctp_skb_cb *cb = (void *)skb->cb; + + WARN_ON(cb->magic != 0x4d435450); + return (void *)(skb->cb); +} + +/* Route definition. + * + * These are held in the pernet->mctp.routes list, with RCU protection for + * removed routes. We hold a reference to the netdev; routes need to be + * dropped on NETDEV_UNREGISTER events. + * + * Updates to the route table are performed under rtnl; all reads under RCU, + * so routes cannot be referenced over a RCU grace period. Specifically: A + * caller cannot block between mctp_route_lookup and passing the route to + * mctp_do_route. + */ +struct mctp_route { + mctp_eid_t min, max; + + struct mctp_dev *dev; + unsigned int mtu; + unsigned char type; + int (*output)(struct mctp_route *route, + struct sk_buff *skb); + + struct list_head list; + refcount_t refs; + struct rcu_head rcu; +}; + +/* route interfaces */ +struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet, + mctp_eid_t daddr); + +int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb); + +int mctp_local_output(struct sock *sk, struct mctp_route *rt, + struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag); + +/* routing <--> device interface */ +unsigned int mctp_default_net(struct net *net); +int mctp_default_net_set(struct net *net, unsigned int index); +int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr); +int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr); +void mctp_route_remove_dev(struct mctp_dev *mdev); + +/* neighbour definitions */ +enum mctp_neigh_source { + MCTP_NEIGH_STATIC, + MCTP_NEIGH_DISCOVER, +}; + +struct mctp_neigh { + struct mctp_dev *dev; + mctp_eid_t eid; + enum mctp_neigh_source source; + + unsigned char ha[MAX_ADDR_LEN]; + + struct list_head list; + struct rcu_head rcu; +}; + +int mctp_neigh_init(void); +void mctp_neigh_exit(void); + +// ret_hwaddr may be NULL, otherwise must have space for MAX_ADDR_LEN +int mctp_neigh_lookup(struct mctp_dev *dev, mctp_eid_t eid, + void *ret_hwaddr); +void mctp_neigh_remove_dev(struct mctp_dev *mdev); + +int mctp_routes_init(void); +void mctp_routes_exit(void); + +void mctp_device_init(void); +void mctp_device_exit(void); + +#endif /* __NET_MCTP_H */ diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h new file mode 100644 index 000000000000..71a11012fac7 --- /dev/null +++ b/include/net/mctpdevice.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Management Component Transport Protocol (MCTP) - device + * definitions. + * + * Copyright (c) 2021 Code Construct + * Copyright (c) 2021 Google + */ + +#ifndef __NET_MCTPDEVICE_H +#define __NET_MCTPDEVICE_H + +#include <linux/list.h> +#include <linux/types.h> +#include <linux/refcount.h> + +struct mctp_dev { + struct net_device *dev; + + unsigned int net; + + /* Only modified under RTNL. Reads have addrs_lock held */ + u8 *addrs; + size_t num_addrs; + spinlock_t addrs_lock; + + struct rcu_head rcu; +}; + +#define MCTP_INITIAL_DEFAULT_NET 1 + +struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev); +struct mctp_dev *__mctp_dev_get(const struct net_device *dev); + +#endif /* __NET_MCTPDEVICE_H */ diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 83f23774b908..6026bbefbffd 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -23,6 +23,7 @@ struct mptcp_ext { u64 data_seq; u32 subflow_seq; u16 data_len; + __sum16 csum; u8 use_map:1, dsn64:1, data_fin:1, @@ -31,7 +32,8 @@ struct mptcp_ext { mpc_map:1, frozen:1, reset_transient:1; - u8 reset_reason:4; + u8 reset_reason:4, + csum_reqd:1; }; #define MPTCP_RM_IDS_MAX 8 @@ -56,20 +58,33 @@ struct mptcp_addr_info { struct mptcp_out_options { #if IS_ENABLED(CONFIG_MPTCP) u16 suboptions; - u64 sndr_key; - u64 rcvr_key; - u64 ahmac; - struct mptcp_addr_info addr; struct mptcp_rm_list rm_list; u8 join_id; u8 backup; - u8 reset_reason:4; - u8 reset_transient:1; - u32 nonce; - u64 thmac; - u32 token; - u8 hmac[20]; - struct mptcp_ext ext_copy; + u8 reset_reason:4, + reset_transient:1, + csum_reqd:1, + allow_join_id0:1; + union { + struct { + u64 sndr_key; + u64 rcvr_key; + }; + struct { + struct mptcp_addr_info addr; + u64 ahmac; + }; + struct { + struct mptcp_ext ext_copy; + u64 fail_seq; + }; + struct { + u32 nonce; + u32 token; + u64 thmac; + u8 hmac[20]; + }; + }; #endif }; @@ -101,7 +116,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts); -void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts); @@ -223,9 +238,10 @@ static inline bool mptcp_established_options(struct sock *sk, return false; } -static inline void mptcp_incoming_options(struct sock *sk, +static inline bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { + return true; } static inline void mptcp_skb_ext_move(struct sk_buff *to, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index fa5887143f0d..bb5fa5914032 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -23,7 +23,6 @@ #include <net/netns/ieee802154_6lowpan.h> #include <net/netns/sctp.h> #include <net/netns/netfilter.h> -#include <net/netns/x_tables.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netns/conntrack.h> #endif @@ -32,7 +31,9 @@ #include <net/netns/mpls.h> #include <net/netns/can.h> #include <net/netns/xdp.h> +#include <net/netns/smc.h> #include <net/netns/bpf.h> +#include <net/netns/mctp.h> #include <linux/ns_common.h> #include <linux/idr.h> #include <linux/skbuff.h> @@ -131,7 +132,6 @@ struct net { #endif #ifdef CONFIG_NETFILTER struct netns_nf nf; - struct netns_xt xt; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct netns_ct ct; #endif @@ -166,10 +166,16 @@ struct net { #ifdef CONFIG_XDP_SOCKETS struct netns_xdp xdp; #endif +#if IS_ENABLED(CONFIG_MCTP) + struct netns_mctp mctp; +#endif #if IS_ENABLED(CONFIG_CRYPTO_USER) struct sock *crypto_nlsk; #endif struct sock *diag_nlsk; +#if IS_ENABLED(CONFIG_SMC) + struct netns_smc smc; +#endif } __randomize_layout; #include <linux/seq_file_net.h> @@ -184,6 +190,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); void net_ns_barrier(void); + +struct ns_common *get_net_ns(struct ns_common *ns); +struct net *get_net_ns_by_fd(int fd); #else /* CONFIG_NET_NS */ #include <linux/sched.h> #include <linux/nsproxy.h> @@ -203,13 +212,22 @@ static inline void net_ns_get_ownership(const struct net *net, } static inline void net_ns_barrier(void) {} + +static inline struct ns_common *get_net_ns(struct ns_common *ns) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct net *get_net_ns_by_fd(int fd) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_NET_NS */ extern struct list_head net_namespace_list; struct net *get_net_ns_by_pid(pid_t pid); -struct net *get_net_ns_by_fd(int fd); #ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 06dc6db70d18..cc663c68ddc4 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -346,6 +346,13 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) skb_set_nfct(skb, (unsigned long)ct | info); } +extern unsigned int nf_conntrack_net_id; + +static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net) +{ + return net_generic(net, nf_conntrack_net_id); +} + #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v)) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 09f2efea0b97..13807ea94cd2 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net); void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list); void nf_conntrack_proto_pernet_init(struct net *net); -void nf_conntrack_proto_pernet_fini(struct net *net); int nf_conntrack_proto_init(void); void nf_conntrack_proto_fini(void); diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index d00ba6048e44..d932e22edcb4 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -72,14 +72,20 @@ struct nf_ct_event { int report; }; +struct nf_exp_event { + struct nf_conntrack_expect *exp; + u32 portid; + int report; +}; + struct nf_ct_event_notifier { - int (*fcn)(unsigned int events, struct nf_ct_event *item); + int (*ct_event)(unsigned int events, const struct nf_ct_event *item); + int (*exp_event)(unsigned int events, const struct nf_exp_event *item); }; -int nf_conntrack_register_notifier(struct net *net, - struct nf_ct_event_notifier *nb); -void nf_conntrack_unregister_notifier(struct net *net, - struct nf_ct_event_notifier *nb); +void nf_conntrack_register_notifier(struct net *net, + const struct nf_ct_event_notifier *nb); +void nf_conntrack_unregister_notifier(struct net *net); void nf_ct_deliver_cached_events(struct nf_conn *ct); int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, @@ -151,22 +157,6 @@ nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) } #ifdef CONFIG_NF_CONNTRACK_EVENTS - -struct nf_exp_event { - struct nf_conntrack_expect *exp; - u32 portid; - int report; -}; - -struct nf_exp_event_notifier { - int (*fcn)(unsigned int events, struct nf_exp_event *item); -}; - -int nf_ct_expect_register_notifier(struct net *net, - struct nf_exp_event_notifier *nb); -void nf_ct_expect_unregister_notifier(struct net *net, - struct nf_exp_event_notifier *nb); - void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp, u32 portid, int report); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 96f9cf81f46b..1f47bef51722 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; #ifdef CONFIG_SYSCTL -__printf(3, 4) __cold +__printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, + const struct nf_hook_state *state, const char *fmt, ...); -__printf(5, 6) __cold +__printf(4, 5) __cold void nf_l4proto_log_invalid(const struct sk_buff *skb, - struct net *net, - u16 pf, u8 protonum, + const struct nf_hook_state *state, + u8 protonum, const char *fmt, ...); #else -static inline __printf(5, 6) __cold -void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net, - u16 pf, u8 protonum, const char *fmt, ...) {} -static inline __printf(3, 4) __cold +static inline __printf(4, 5) __cold +void nf_l4proto_log_invalid(const struct sk_buff *skb, + const struct nf_hook_state *state, + u8 protonum, + const char *fmt, ...) {} +static inline __printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, + const struct nf_hook_state *state, const char *fmt, ...) { } #endif /* CONFIG_SYSCTL */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 48ef7460ff30..a3647fadf1cc 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -177,6 +177,8 @@ struct flow_offload { #define NF_FLOW_TIMEOUT (30 * HZ) #define nf_flowtable_time_stamp (u32)jiffies +unsigned long flow_offload_get_timeout(struct flow_offload *flow); + static inline __s32 nf_flow_timeout_delta(unsigned int timeout) { return (__s32)(timeout - nf_flowtable_time_stamp); diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h new file mode 100644 index 000000000000..52e27920f829 --- /dev/null +++ b/include/net/netfilter/nf_hooks_lwtunnel.h @@ -0,0 +1,7 @@ +#include <linux/sysctl.h> +#include <linux/types.h> + +#ifdef CONFIG_SYSCTL +int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +#endif diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index e770bba00066..9eed51e920e8 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -33,8 +33,8 @@ struct nf_queue_handler { void (*nf_hook_drop)(struct net *net); }; -void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); -void nf_unregister_queue_handler(struct net *net); +void nf_register_queue_handler(const struct nf_queue_handler *qh); +void nf_unregister_queue_handler(void); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); void nf_queue_entry_get_refs(struct nf_queue_entry *entry); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0a5655e300b5..148f5d8ee5ab 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -23,35 +23,46 @@ struct module; struct nft_pktinfo { struct sk_buff *skb; + const struct nf_hook_state *state; bool tprot_set; u8 tprot; - /* for x_tables compatibility */ - struct xt_action_param xt; + u16 fragoff; + unsigned int thoff; }; +static inline struct sock *nft_sk(const struct nft_pktinfo *pkt) +{ + return pkt->state->sk; +} + +static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt) +{ + return pkt->thoff; +} + static inline struct net *nft_net(const struct nft_pktinfo *pkt) { - return pkt->xt.state->net; + return pkt->state->net; } static inline unsigned int nft_hook(const struct nft_pktinfo *pkt) { - return pkt->xt.state->hook; + return pkt->state->hook; } static inline u8 nft_pf(const struct nft_pktinfo *pkt) { - return pkt->xt.state->pf; + return pkt->state->pf; } static inline const struct net_device *nft_in(const struct nft_pktinfo *pkt) { - return pkt->xt.state->in; + return pkt->state->in; } static inline const struct net_device *nft_out(const struct nft_pktinfo *pkt) { - return pkt->xt.state->out; + return pkt->state->out; } static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, @@ -59,16 +70,15 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_state *state) { pkt->skb = skb; - pkt->xt.state = state; + pkt->state = state; } -static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt) { pkt->tprot_set = false; pkt->tprot = 0; - pkt->xt.thoff = 0; - pkt->xt.fragoff = 0; + pkt->thoff = 0; + pkt->fragoff = 0; } /** diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index fd10a7862fdc..0fa5a6d98a00 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -3,6 +3,7 @@ #define _NET_NF_TABLES_CORE_H #include <net/netfilter/nf_tables.h> +#include <linux/indirect_call_wrapper.h> extern struct nft_expr_type nft_imm_type; extern struct nft_expr_type nft_cmp_type; @@ -15,6 +16,7 @@ extern struct nft_expr_type nft_range_type; extern struct nft_expr_type nft_meta_type; extern struct nft_expr_type nft_rt_type; extern struct nft_expr_type nft_exthdr_type; +extern struct nft_expr_type nft_last_type; #ifdef CONFIG_NETWORK_SECMARK extern struct nft_object_type nft_secmark_obj_type; @@ -88,6 +90,36 @@ extern const struct nft_set_type nft_set_bitmap_type; extern const struct nft_set_type nft_set_pipapo_type; extern const struct nft_set_type nft_set_pipapo_avx2_type; +#ifdef CONFIG_RETPOLINE +bool nft_rhash_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_hash_lookup_fast(const struct net *net, + const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_hash_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +bool nft_set_do_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +#else +static inline bool +nft_set_do_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext) +{ + return set->ops->lookup(net, set, key, ext); +} +#endif + +/* called from nft_pipapo_avx2.c */ +bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); +/* called from nft_set_pipapo.c */ +bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext); + struct nft_expr; struct nft_regs; struct nft_pktinfo; diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index 1f7bea39ad1b..eb4c094cd54d 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -5,26 +5,24 @@ #include <net/netfilter/nf_tables.h> #include <net/ip.h> -static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt) { struct iphdr *ip; ip = ip_hdr(pkt->skb); pkt->tprot_set = true; pkt->tprot = ip->protocol; - pkt->xt.thoff = ip_hdrlen(pkt->skb); - pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; + pkt->thoff = ip_hdrlen(pkt->skb); + pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET; } -static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) { struct iphdr *iph, _iph; u32 len, thoff; - iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph), - &_iph); + iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*iph), &_iph); if (!iph) return -1; @@ -33,42 +31,40 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, len = ntohs(iph->tot_len); thoff = iph->ihl * 4; - if (skb->len < len) + if (pkt->skb->len < len) return -1; else if (len < thoff) return -1; pkt->tprot_set = true; pkt->tprot = iph->protocol; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; + pkt->thoff = thoff; + pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; return 0; } -static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) { - if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0) - nft_set_pktinfo_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv4_validate(pkt) < 0) + nft_set_pktinfo_unspec(pkt); } -static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt) { struct iphdr *iph; u32 len, thoff; - if (!pskb_may_pull(skb, sizeof(*iph))) + if (!pskb_may_pull(pkt->skb, sizeof(*iph))) return -1; - iph = ip_hdr(skb); + iph = ip_hdr(pkt->skb); if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; len = ntohs(iph->tot_len); thoff = iph->ihl * 4; - if (skb->len < len) { + if (pkt->skb->len < len) { __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS); return -1; } else if (len < thoff) { @@ -77,8 +73,8 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = iph->protocol; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; + pkt->thoff = thoff; + pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET; return 0; diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index 867de29f3f7a..7595e02b00ba 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -6,8 +6,7 @@ #include <net/ipv6.h> #include <net/netfilter/nf_tables.h> -static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt) { unsigned int flags = IP6_FH_F_AUTH; int protohdr, thoff = 0; @@ -15,18 +14,17 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) { - nft_set_pktinfo_unspec(pkt, skb); + nft_set_pktinfo_unspec(pkt); return; } pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; } -static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -36,8 +34,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, int protohdr; u32 pkt_len; - ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h), - &_ip6h); + ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb), + sizeof(*ip6h), &_ip6h); if (!ip6h) return -1; @@ -45,7 +43,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, return -1; pkt_len = ntohs(ip6h->payload_len); - if (pkt_len + sizeof(*ip6h) > skb->len) + if (pkt_len + sizeof(*ip6h) > pkt->skb->len) return -1; protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); @@ -54,8 +52,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; return 0; #else @@ -63,15 +61,13 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, #endif } -static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt) { - if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0) - nft_set_pktinfo_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv6_validate(pkt) < 0) + nft_set_pktinfo_unspec(pkt); } -static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -82,15 +78,15 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, int protohdr; u32 pkt_len; - if (!pskb_may_pull(skb, sizeof(*ip6h))) + if (!pskb_may_pull(pkt->skb, sizeof(*ip6h))) return -1; - ip6h = ipv6_hdr(skb); + ip6h = ipv6_hdr(pkt->skb); if (ip6h->version != 6) goto inhdr_error; pkt_len = ntohs(ip6h->payload_len); - if (pkt_len + sizeof(*ip6h) > skb->len) { + if (pkt_len + sizeof(*ip6h) > pkt->skb->len) { idev = __in6_dev_get(nft_in(pkt)); __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS); return -1; @@ -102,8 +98,8 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, pkt->tprot_set = true; pkt->tprot = protohdr; - pkt->xt.thoff = thoff; - pkt->xt.fragoff = frag_off; + pkt->thoff = thoff; + pkt->fragoff = frag_off; return 0; diff --git a/include/net/netlink.h b/include/net/netlink.h index 1ceec518ab49..7a2a9d3144ba 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -885,7 +885,7 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh, */ static inline int nlmsg_report(const struct nlmsghdr *nlh) { - return !!(nlh->nlmsg_flags & NLM_F_ECHO); + return nlh ? !!(nlh->nlmsg_flags & NLM_F_ECHO) : 0; } /** diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index ad0a95c2335e..0294f3d473af 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -27,6 +27,10 @@ struct nf_tcp_net { u8 tcp_loose; u8 tcp_be_liberal; u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + unsigned int offload_timeout; +#endif }; enum udp_conntrack { @@ -37,6 +41,9 @@ enum udp_conntrack { struct nf_udp_net { unsigned int timeouts[UDP_CT_MAX]; +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + unsigned int offload_timeout; +#endif }; struct nf_icmp_net { @@ -106,7 +113,6 @@ struct netns_ct { struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; - struct nf_exp_event_notifier __rcu *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; #if defined(CONFIG_NF_CONNTRACK_LABELS) unsigned int labels_used; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index f6af8d96d3c6..2f65701a43c9 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -126,6 +126,7 @@ struct netns_ipv4 { u8 sysctl_tcp_syn_retries; u8 sysctl_tcp_synack_retries; u8 sysctl_tcp_syncookies; + u8 sysctl_tcp_migrate_req; int sysctl_tcp_reordering; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; @@ -173,7 +174,6 @@ struct netns_ipv4 { int sysctl_tcp_fastopen; const struct tcp_congestion_ops __rcu *tcp_congestion_control; struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; - spinlock_t tcp_fastopen_ctx_lock; unsigned int sysctl_tcp_fastopen_blackhole_timeout; atomic_t tfo_active_disable_times; unsigned long tfo_active_disable_stamp; @@ -210,6 +210,7 @@ struct netns_ipv4 { #endif #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH + u32 sysctl_fib_multipath_hash_fields; u8 sysctl_fib_multipath_use_neigh; u8 sysctl_fib_multipath_hash_policy; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 6153c8067009..a4b550380316 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -28,8 +28,9 @@ struct netns_sysctl_ipv6 { int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; - u8 bindv6only; + u32 multipath_hash_fields; u8 multipath_hash_policy; + u8 bindv6only; u8 flowlabel_consistency; u8 auto_flowlabels; int icmpv6_time; @@ -50,6 +51,8 @@ struct netns_sysctl_ipv6 { int max_dst_opts_len; int max_hbh_opts_len; int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; bool skip_notify_on_dev_down; u8 fib_notify_on_flag_change; }; @@ -109,6 +112,7 @@ struct netns_ipv6 { spinlock_t lock; u32 seq; } ip6addrlbl_table; + struct ioam6_pernet_data *ioam6_data; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h new file mode 100644 index 000000000000..acedef12a35e --- /dev/null +++ b/include/net/netns/mctp.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MCTP per-net structures + */ + +#ifndef __NETNS_MCTP_H__ +#define __NETNS_MCTP_H__ + +#include <linux/types.h> + +struct netns_mctp { + /* Only updated under RTNL, entries freed via RCU */ + struct list_head routes; + + /* Bound sockets: list of sockets bound by type. + * This list is updated from non-atomic contexts (under bind_lock), + * and read (under rcu) in packet rx + */ + struct mutex bind_lock; + struct hlist_head binds; + + /* tag allocations. This list is read and updated from atomic contexts, + * but elements are free()ed after a RCU grace-period + */ + spinlock_t keys_lock; + struct hlist_head keys; + + /* MCTP network */ + unsigned int default_net; + + /* neighbour table */ + struct mutex neigh_lock; + struct list_head neighbours; +}; + +#endif /* __NETNS_MCTP_H__ */ diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 15e2b13fb0c0..986a2a9cfdfa 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -12,7 +12,6 @@ struct netns_nf { #if defined CONFIG_PROC_FS struct proc_dir_entry *proc_netfilter; #endif - const struct nf_queue_handler __rcu *queue_handler; const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; #ifdef CONFIG_SYSCTL struct ctl_table_header *nf_log_dir_header; diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index a0f315effa94..40240722cdca 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -84,6 +84,9 @@ struct netns_sctp { /* HB.interval - 30 seconds */ unsigned int hb_interval; + /* The interval for PLPMTUD probe timer */ + unsigned int probe_interval; + /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) * Max.Init.Retransmits - 8 attempts diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h new file mode 100644 index 000000000000..ea8a9cf2619b --- /dev/null +++ b/include/net/netns/smc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETNS_SMC_H__ +#define __NETNS_SMC_H__ +#include <linux/mutex.h> +#include <linux/percpu.h> + +struct smc_stats_rsn; +struct smc_stats; +struct netns_smc { + /* per cpu counters for SMC */ + struct smc_stats __percpu *smc_stats; + /* protect fback_rsn */ + struct mutex mutex_fback_rsn; + struct smc_stats_rsn *fback_rsn; +}; +#endif diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h deleted file mode 100644 index d02316ec2906..000000000000 --- a/include/net/netns/x_tables.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __NETNS_X_TABLES_H -#define __NETNS_X_TABLES_H - -#include <linux/list.h> -#include <linux/netfilter_defs.h> - -struct netns_xt { - bool notrack_deprecated_warning; - bool clusterip_deprecated_warning; -}; -#endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index e816b6a3ef2b..947733a639a6 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -42,6 +42,7 @@ struct netns_xfrm { struct hlist_head __rcu *state_bydst; struct hlist_head __rcu *state_bysrc; struct hlist_head __rcu *state_byspi; + struct hlist_head __rcu *state_byseq; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; @@ -64,6 +65,13 @@ struct netns_xfrm { u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; + + u8 policy_default; +#define XFRM_POL_DEFAULT_IN 1 +#define XFRM_POL_DEFAULT_OUT 2 +#define XFRM_POL_DEFAULT_FWD 4 +#define XFRM_POL_DEFAULT_MASK 7 + #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_hdr; #endif @@ -74,6 +82,7 @@ struct netns_xfrm { #endif spinlock_t xfrm_state_lock; seqcount_spinlock_t xfrm_state_hash_generation; + seqcount_spinlock_t xfrm_policy_hash_generation; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h index 963db96bcbbb..bb3e8fdc0692 100644 --- a/include/net/nfc/digital.h +++ b/include/net/nfc/digital.h @@ -191,7 +191,7 @@ struct digital_poll_tech { struct nfc_digital_dev { struct nfc_dev *nfc_dev; - struct nfc_digital_ops *ops; + const struct nfc_digital_ops *ops; u32 protocols; @@ -236,7 +236,7 @@ struct nfc_digital_dev { void (*skb_add_crc)(struct sk_buff *skb); }; -struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops, +struct nfc_digital_dev *nfc_digital_allocate_device(const struct nfc_digital_ops *ops, __u32 supported_protocols, __u32 driver_capabilities, int tx_headroom, diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index b35f37a57686..756c11084f65 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -118,7 +118,7 @@ struct nfc_hci_dev { struct sk_buff_head msg_rx_queue; - struct nfc_hci_ops *ops; + const struct nfc_hci_ops *ops; struct nfc_llc *llc; @@ -151,7 +151,7 @@ struct nfc_hci_dev { }; /* hci device allocation */ -struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, +struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops, struct nfc_hci_init_data *init_data, unsigned long quirks, u32 protocols, @@ -168,7 +168,7 @@ void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata); void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev); static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev, - struct nfc_vendor_cmd *cmds, + const struct nfc_vendor_cmd *cmds, int n_cmds) { return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds); diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 1df0f8074c9d..a964daedc17b 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -82,10 +82,10 @@ struct nci_ops { void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb); - struct nci_driver_ops *prop_ops; + const struct nci_driver_ops *prop_ops; size_t n_prop_ops; - struct nci_driver_ops *core_ops; + const struct nci_driver_ops *core_ops; size_t n_core_ops; }; @@ -194,7 +194,7 @@ struct nci_hci_dev { /* NCI Core structures */ struct nci_dev { struct nfc_dev *nfc_dev; - struct nci_ops *ops; + const struct nci_ops *ops; struct nci_hci_dev *hci_dev; int tx_headroom; @@ -267,7 +267,7 @@ struct nci_dev { }; /* ----- NCI Devices ----- */ -struct nci_dev *nci_allocate_device(struct nci_ops *ops, +struct nci_dev *nci_allocate_device(const struct nci_ops *ops, __u32 supported_protocols, int tx_headroom, int tx_tailroom); @@ -276,25 +276,27 @@ int nci_register_device(struct nci_dev *ndev); void nci_unregister_device(struct nci_dev *ndev); int nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, - unsigned long opt), - unsigned long opt, __u32 timeout); -int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload); -int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload); + const void *opt), + const void *opt, __u32 timeout); +int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, + const __u8 *payload); +int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, + const __u8 *payload); int nci_core_reset(struct nci_dev *ndev); int nci_core_init(struct nci_dev *ndev); int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb); -int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); +int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val); int nci_nfcee_discover(struct nci_dev *ndev, u8 action); int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode); int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, u8 number_destination_params, size_t params_len, - struct core_conn_create_dest_spec_params *params); + const struct core_conn_create_dest_spec_params *params); int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id); -int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len, +int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len, struct sk_buff **resp); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); @@ -343,7 +345,7 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev) } static inline int nci_set_vendor_cmds(struct nci_dev *ndev, - struct nfc_vendor_cmd *cmds, + const struct nfc_vendor_cmd *cmds, int n_cmds) { return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds); @@ -360,7 +362,7 @@ int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode, int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb); void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); -int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); +int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload); int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id); void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, @@ -378,7 +380,7 @@ void nci_req_complete(struct nci_dev *ndev, int result); struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, int conn_id); int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type, - struct dest_spec_params *params); + const struct dest_spec_params *params); /* ----- NCI status code ----- */ int nci_to_errno(__u8 code); diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 2cd3a261bcbc..5dee575fbe86 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -188,17 +188,17 @@ struct nfc_dev { struct rfkill *rfkill; - struct nfc_vendor_cmd *vendor_cmds; + const struct nfc_vendor_cmd *vendor_cmds; int n_vendor_cmds; - struct nfc_ops *ops; + const struct nfc_ops *ops; struct genl_info *cur_cmd_info; }; #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) extern struct class nfc_class; -struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, +struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops, u32 supported_protocols, int tx_headroom, int tx_tailroom); @@ -245,7 +245,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data) * * @dev: The nfc device */ -static inline void *nfc_get_drvdata(struct nfc_dev *dev) +static inline void *nfc_get_drvdata(const struct nfc_dev *dev) { return dev_get_drvdata(&dev->dev); } @@ -255,7 +255,7 @@ static inline void *nfc_get_drvdata(struct nfc_dev *dev) * * @dev: The nfc device whose name to return */ -static inline const char *nfc_device_name(struct nfc_dev *dev) +static inline const char *nfc_device_name(const struct nfc_dev *dev) { return dev_name(&dev->dev); } @@ -266,7 +266,7 @@ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp); int nfc_set_remote_general_bytes(struct nfc_dev *dev, - u8 *gt, u8 gt_len); + const u8 *gt, u8 gt_len); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len); int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, @@ -280,7 +280,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, - u8 *gb, size_t gb_len); + const u8 *gb, size_t gb_len); int nfc_tm_deactivated(struct nfc_dev *dev); int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb); @@ -297,7 +297,7 @@ void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, u8 payload_type, u8 direction); static inline int nfc_set_vendor_cmds(struct nfc_dev *dev, - struct nfc_vendor_cmd *cmds, + const struct nfc_vendor_cmd *cmds, int n_cmds) { if (dev->vendor_cmds || dev->n_vendor_cmds) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b4b6de909c93..a4082406a003 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -45,7 +45,10 @@ * Please note DMA-sync-for-CPU is still * device driver responsibility */ -#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) +#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ +#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ + PP_FLAG_DMA_SYNC_DEV |\ + PP_FLAG_PAGE_FRAG) /* * Fast allocation side cache array/stack @@ -88,6 +91,9 @@ struct page_pool { unsigned long defer_warn; u32 pages_state_hold_cnt; + unsigned int frag_offset; + struct page *frag_page; + long frag_users; /* * Data structure for allocation side @@ -137,6 +143,18 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) return page_pool_alloc_pages(pool, gfp); } +struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, + unsigned int size, gfp_t gfp); + +static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, + unsigned int *offset, + unsigned int size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_frag(pool, offset, size, gfp); +} + /* get the stored dma direction. A driver might decide to treat this locally and * avoid the extra cache line from page_pool to determine the direction */ @@ -146,6 +164,8 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) return pool->p.dma_dir; } +bool page_pool_return_skb_page(struct page *page); + struct page_pool *page_pool_create(const struct page_pool_params *params); #ifdef CONFIG_PAGE_POOL @@ -196,19 +216,48 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, page_pool_put_full_page(pool, page, true); } +#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ + (sizeof(dma_addr_t) > sizeof(unsigned long)) + static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { - dma_addr_t ret = page->dma_addr[0]; - if (sizeof(dma_addr_t) > sizeof(unsigned long)) - ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16; + dma_addr_t ret = page->dma_addr; + + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; + return ret; } static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { - page->dma_addr[0] = addr; - if (sizeof(dma_addr_t) > sizeof(unsigned long)) - page->dma_addr[1] = upper_32_bits(addr); + page->dma_addr = addr; + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + page->dma_addr_upper = upper_32_bits(addr); +} + +static inline void page_pool_set_frag_count(struct page *page, long nr) +{ + atomic_long_set(&page->pp_frag_count, nr); +} + +static inline long page_pool_atomic_sub_frag_count_return(struct page *page, + long nr) +{ + long ret; + + /* As suggested by Alexander, atomic_long_read() may cover up the + * reference count errors, so avoid calling atomic_long_read() in + * the cases of freeing or draining the page_frags, where we would + * not expect it to match or that are slowpath anyway. + */ + if (__builtin_constant_p(nr) && + atomic_long_read(&page->pp_frag_count) == nr) + return 0; + + ret = atomic_long_sub_return(nr, &page->pp_frag_count); + WARN_ON(ret < 0); + return ret; } static inline bool is_page_pool_compiled_in(void) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ec7823921bd2..83a6d0792180 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -76,12 +76,10 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return block->q; } -int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res, bool compat_mode); -int tcf_classify_ingress(struct sk_buff *skb, - const struct tcf_block *ingress_block, - const struct tcf_proto *tp, struct tcf_result *res, - bool compat_mode); +int tcf_classify(struct sk_buff *skb, + const struct tcf_block *block, + const struct tcf_proto *tp, struct tcf_result *res, + bool compat_mode); #else static inline bool tcf_block_shared(struct tcf_block *block) @@ -138,20 +136,14 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, { } -static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, +static inline int tcf_classify(struct sk_buff *skb, + const struct tcf_block *block, + const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { return TC_ACT_UNSPEC; } -static inline int tcf_classify_ingress(struct sk_buff *skb, - const struct tcf_block *ingress_block, - const struct tcf_proto *tp, - struct tcf_result *res, bool compat_mode) -{ - return TC_ACT_UNSPEC; -} - #endif static inline unsigned long @@ -327,7 +319,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, - struct tcf_exts *exts, bool ovr, bool rtnl_held, + struct tcf_exts *exts, u32 flags, struct netlink_ext_ack *extack); void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); @@ -337,6 +329,9 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); /** * struct tcf_pkt_info - packet information + * + * @ptr: start of the pkt data + * @nexthdr: offset of the next header */ struct tcf_pkt_info { unsigned char * ptr; @@ -355,6 +350,7 @@ struct tcf_ematch_ops; * @ops: the operations lookup table of the corresponding ematch module * @datalen: length of the ematch specific configuration data * @data: ematch specific data + * @net: the network namespace */ struct tcf_ematch { struct tcf_ematch_ops * ops; @@ -820,10 +816,9 @@ enum tc_htb_command { struct tc_htb_qopt_offload { struct netlink_ext_ack *extack; enum tc_htb_command command; - u16 classid; u32 parent_classid; + u16 classid; u16 qid; - u16 moved_qid; u64 rate; u64 ceil; }; diff --git a/include/net/protocol.h b/include/net/protocol.h index 2b778e1d2d8f..f51c06ae365f 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -43,7 +43,6 @@ struct net_protocol { int (*err_handler)(struct sk_buff *skb, u32 info); unsigned int no_policy:1, - netns_ok:1, /* does the protocol do more stringent * icmp tag validation than simple * socket lookup? diff --git a/include/net/psample.h b/include/net/psample.h index e328c5127757..0509d2d6be67 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num); void psample_group_take(struct psample_group *group); void psample_group_put(struct psample_group *group); +struct sk_buff; + #if IS_ENABLED(CONFIG_PSAMPLE) void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 479f60ef54c0..9f48733bfd21 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -37,6 +37,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @maxtype: Highest device specific netlink attribute number * @policy: Netlink policy for device specific attribute validation * @validate: Optional validation function for netlink/changelink parameters + * @alloc: netdev allocation function, can be %NULL and is then used + * in place of alloc_netdev_mqs(), in this case @priv_size + * and @setup are unused. Returns a netdev or ERR_PTR(). * @priv_size: sizeof net_device private space * @setup: net_device setup function * @newlink: Function for configuring and registering a new device @@ -63,6 +66,11 @@ struct rtnl_link_ops { const char *kind; size_t priv_size; + struct net_device *(*alloc)(struct nlattr *tb[], + const char *ifname, + unsigned char name_assign_type, + unsigned int num_tx_queues, + unsigned int num_rx_queues); void (*setup)(struct net_device *dev); bool netns_refund; @@ -145,7 +153,8 @@ struct rtnl_af_ops { u32 ext_filter_mask); int (*validate_link_af)(const struct net_device *dev, - const struct nlattr *attr); + const struct nlattr *attr, + struct netlink_ext_ack *extack); int (*set_link_af)(struct net_device *dev, const struct nlattr *attr, struct netlink_ext_ack *extack); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 1e625519ae96..c0069ac00e62 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -37,8 +37,15 @@ enum qdisc_state_t { __QDISC_STATE_SCHED, __QDISC_STATE_DEACTIVATED, __QDISC_STATE_MISSED, + __QDISC_STATE_DRAINING, }; +#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) +#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) + +#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ + QDISC_STATE_DRAINING) + struct qdisc_size_table { struct rcu_head rcu; struct list_head list; @@ -110,8 +117,6 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; spinlock_t seqlock; - /* for NOLOCK qdisc, true if there are no enqueued skbs */ - bool empty; struct rcu_head rcu; /* private data */ @@ -145,6 +150,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc) return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; } +static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) +{ + return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); +} + static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) { return q->flags & TCQ_F_CPUSTATS; @@ -153,7 +163,7 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) static inline bool qdisc_is_empty(const struct Qdisc *qdisc) { if (qdisc_is_percpu_stats(qdisc)) - return READ_ONCE(qdisc->empty); + return nolock_qdisc_is_empty(qdisc); return !READ_ONCE(qdisc->q.qlen); } @@ -161,7 +171,13 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) { if (spin_trylock(&qdisc->seqlock)) - goto nolock_empty; + return true; + + /* Paired with smp_mb__after_atomic() to make sure + * STATE_MISSED checking is synchronized with clearing + * in pfifo_fast_dequeue(). + */ + smp_mb__before_atomic(); /* If the MISSED flag is set, it means other thread has * set the MISSED flag before second spin_trylock(), so @@ -180,14 +196,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) */ set_bit(__QDISC_STATE_MISSED, &qdisc->state); + /* spin_trylock() only has load-acquire semantic, so use + * smp_mb__after_atomic() to ensure STATE_MISSED is set + * before doing the second spin_trylock(). + */ + smp_mb__after_atomic(); + /* Retry again in case other CPU may not see the new flag * after it releases the lock at the end of qdisc_run_end(). */ - if (!spin_trylock(&qdisc->seqlock)) - return false; - -nolock_empty: - WRITE_ONCE(qdisc->empty, false); + return spin_trylock(&qdisc->seqlock); } else if (qdisc_is_running(qdisc)) { return false; } @@ -201,15 +219,14 @@ nolock_empty: static inline void qdisc_run_end(struct Qdisc *qdisc) { - write_seqcount_end(&qdisc->running); if (qdisc->flags & TCQ_F_NOLOCK) { spin_unlock(&qdisc->seqlock); if (unlikely(test_bit(__QDISC_STATE_MISSED, - &qdisc->state))) { - clear_bit(__QDISC_STATE_MISSED, &qdisc->state); + &qdisc->state))) __netif_schedule(qdisc); - } + } else { + write_seqcount_end(&qdisc->running); } } @@ -340,7 +357,7 @@ struct tcf_proto_ops { int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, - void **, bool, bool, + void **, u32, struct netlink_ext_ack *); int (*delete)(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 5e848884ff61..2058fabffbf6 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -59,6 +59,7 @@ enum sctp_verb { SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */ + SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer. */ SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */ SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 14a0d22c9113..5859e0a16a58 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -77,6 +77,7 @@ enum sctp_event_timeout { SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, SCTP_EVENT_TIMEOUT_HEARTBEAT, SCTP_EVENT_TIMEOUT_RECONF, + SCTP_EVENT_TIMEOUT_PROBE, SCTP_EVENT_TIMEOUT_SACK, SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; @@ -200,6 +201,23 @@ enum sctp_sock_state { SCTP_SS_CLOSING = TCP_CLOSE_WAIT, }; +enum sctp_plpmtud_state { + SCTP_PL_DISABLED, + SCTP_PL_BASE, + SCTP_PL_SEARCH, + SCTP_PL_COMPLETE, + SCTP_PL_ERROR, +}; + +#define SCTP_BASE_PLPMTU 1200 +#define SCTP_MAX_PLPMTU 9000 +#define SCTP_MIN_PLPMTU 512 + +#define SCTP_MAX_PROBES 3 + +#define SCTP_PL_BIG_STEP 32 +#define SCTP_PL_MIN_STEP 4 + /* These functions map various type to printable names. */ const char *sctp_cname(const union sctp_subtype id); /* chunk types */ const char *sctp_oname(const union sctp_subtype id); /* other events */ @@ -342,8 +360,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -351,7 +368,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ @@ -424,4 +440,6 @@ enum { */ #define SCTP_AUTH_RANDOM_LENGTH 32 +#define SCTP_PROBE_TIMER_MIN 5000 + #endif /* __sctp_constants_h__ */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 86f74f2fe6de..69bab88ad66b 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -145,6 +145,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, struct sctp_transport **); void sctp_err_finish(struct sock *, struct sctp_transport *); +int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb); +int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb); void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, struct sctp_transport *t, __u32 pmtu); void sctp_icmp_redirect(struct sock *, struct sctp_transport *, @@ -573,14 +575,15 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport * /* Calculate max payload size given a MTU, or the total overhead if * given MTU is zero */ -static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, - __u32 mtu, __u32 extra) +static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp, + const struct sctp_transport *t, + __u32 mtu, __u32 extra) { __u32 overhead = sizeof(struct sctphdr) + extra; if (sp) { overhead += sp->pf->af->net_header_len; - if (sp->udp_port) + if (sp->udp_port && (!t || t->encap_port)) overhead += sizeof(struct udphdr); } else { overhead += sizeof(struct ipv6hdr); @@ -592,6 +595,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, return mtu ? mtu - overhead : overhead; } +static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, + __u32 mtu, __u32 extra) +{ + return __sctp_mtu_payload(sp, NULL, mtu, extra); +} + static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) { return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst), @@ -615,6 +624,48 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); } +static inline int sctp_transport_pl_hlen(struct sctp_transport *t) +{ + return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0); +} + +static inline void sctp_transport_pl_reset(struct sctp_transport *t) +{ + if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) && + (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) { + if (t->pl.state == SCTP_PL_DISABLED) { + t->pl.state = SCTP_PL_BASE; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pl.probe_size = SCTP_BASE_PLPMTU; + sctp_transport_reset_probe_timer(t); + } + } else { + if (t->pl.state != SCTP_PL_DISABLED) { + if (del_timer(&t->probe_timer)) + sctp_transport_put(t); + t->pl.state = SCTP_PL_DISABLED; + } + } +} + +static inline void sctp_transport_pl_update(struct sctp_transport *t) +{ + if (t->pl.state == SCTP_PL_DISABLED) + return; + + if (del_timer(&t->probe_timer)) + sctp_transport_put(t); + + t->pl.state = SCTP_PL_BASE; + t->pl.pmtu = SCTP_BASE_PLPMTU; + t->pl.probe_size = SCTP_BASE_PLPMTU; +} + +static inline bool sctp_transport_pl_enabled(struct sctp_transport *t) +{ + return t->pl.state != SCTP_PL_DISABLED; +} + static inline bool sctp_newsk_ready(const struct sock *sk) { return sock_flag(sk, SOCK_DEAD) || sk->sk_socket; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index fd223c94589a..2eb6d7c2c931 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -151,6 +151,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort; /* Prototypes for timeout event state functions. */ sctp_state_fn_t sctp_sf_do_6_3_3_rtx; sctp_state_fn_t sctp_sf_send_reconf; +sctp_state_fn_t sctp_sf_send_probe; sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; @@ -225,11 +226,13 @@ struct sctp_chunk *sctp_make_new_encap_port( const struct sctp_association *asoc, const struct sctp_chunk *chunk); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, - const struct sctp_transport *transport); + const struct sctp_transport *transport, + __u32 probe_size); struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen); +struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len); struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, @@ -310,6 +313,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type, void sctp_generate_t3_rtx_event(struct timer_list *t); void sctp_generate_heartbeat_event(struct timer_list *t); void sctp_generate_reconf_event(struct timer_list *t); +void sctp_generate_probe_event(struct timer_list *t); void sctp_generate_proto_unreach_event(struct timer_list *t); void sctp_ootb_pkt_free(struct sctp_packet *packet); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 1aa585216f34..651bba654d77 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -177,6 +177,7 @@ struct sctp_sock { * will be inherited by all new associations. */ __u32 hbinterval; + __u32 probe_interval; __be16 udp_port; __be16 encap_port; @@ -385,6 +386,7 @@ struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; + __u32 probe_size; }; int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, @@ -461,7 +463,7 @@ struct sctp_af { int saddr); void (*from_sk) (union sctp_addr *, struct sock *sk); - void (*from_addr_param) (union sctp_addr *, + bool (*from_addr_param) (union sctp_addr *, union sctp_addr_param *, __be16 port, int iif); int (*to_addr_param) (const union sctp_addr *, @@ -656,6 +658,7 @@ struct sctp_chunk { data_accepted:1, /* At least 1 chunk accepted */ auth:1, /* IN: was auth'ed | OUT: needs auth */ has_asconf:1, /* IN: have seen an asconf before */ + pmtu_probe:1, /* Used by PLPMTUD, can be set in s HB chunk */ tsn_missing_report:2, /* Data chunk missing counter. */ fast_retransmit:2; /* Is this chunk fast retransmitted? */ }; @@ -858,6 +861,7 @@ struct sctp_transport { * the destination address every heartbeat interval. */ unsigned long hbinterval; + unsigned long probe_interval; /* SACK delay timeout */ unsigned long sackdelay; @@ -934,6 +938,9 @@ struct sctp_transport { /* Timer to handler reconf chunk rtx */ struct timer_list reconf_timer; + /* Timer to send a probe HB packet for PLPMTUD */ + struct timer_list probe_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct @@ -976,6 +983,16 @@ struct sctp_transport { char cacc_saw_newack; } cacc; + struct { + __u32 last_rtx_chunks; + __u16 pmtu; + __u16 probe_size; + __u16 probe_high; + __u8 probe_count:3; + __u8 raise_count:5; + __u8 state; + } pl; /* plpmtud related */ + /* 64-bit random number sent with heartbeat. */ __u64 hb_nonce; @@ -993,6 +1010,7 @@ void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_t3_rtx(struct sctp_transport *); void sctp_transport_reset_hb_timer(struct sctp_transport *); void sctp_transport_reset_reconf_timer(struct sctp_transport *transport); +void sctp_transport_reset_probe_timer(struct sctp_transport *transport); int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); @@ -1007,6 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); +bool sctp_transport_pl_send(struct sctp_transport *t); +bool sctp_transport_pl_recv(struct sctp_transport *t); /* This is the structure we use to queue packets as they come into @@ -1795,6 +1815,7 @@ struct sctp_association { * will be inherited by all new transports. */ unsigned long hbinterval; + unsigned long probe_interval; __be16 encap_port; diff --git a/include/net/sock.h b/include/net/sock.h index 0e962d8bc73b..66a9a90f9558 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -68,6 +68,7 @@ #include <net/tcp_states.h> #include <linux/net_tstamp.h> #include <net/l3mdev.h> +#include <uapi/linux/socket.h> /* * This structure really needs to be cleaned up. @@ -316,7 +317,9 @@ struct bpf_local_storage; * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only - * @sk_tsflags: SO_TIMESTAMPING socket options + * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock + * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals @@ -493,6 +496,7 @@ struct sock { seqlock_t sk_stamp_seq; #endif u16 sk_tsflags; + int sk_bind_phc; u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; @@ -1435,8 +1439,6 @@ static inline int __sk_prot_rehash(struct sock *sk) #define RCV_SHUTDOWN 1 #define SEND_SHUTDOWN 2 -#define SOCK_SNDBUF_LOCK 1 -#define SOCK_RCVBUF_LOCK 2 #define SOCK_BINDADDR_LOCK 4 #define SOCK_BINDPORT_LOCK 8 @@ -1934,7 +1936,8 @@ static inline u32 net_tx_rndhash(void) static inline void sk_set_txhash(struct sock *sk) { - sk->sk_txhash = net_tx_rndhash(); + /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */ + WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); } static inline bool sk_rethink_txhash(struct sock *sk) @@ -2206,9 +2209,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock, static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) { - if (sk->sk_txhash) { + /* This pairs with WRITE_ONCE() in sk_set_txhash() */ + u32 txhash = READ_ONCE(sk->sk_txhash); + + if (txhash) { skb->l4_hash = 1; - skb->hash = sk->sk_txhash; + skb->hash = txhash; } } @@ -2242,6 +2248,15 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc return false; } +static inline void skb_prepare_for_gro(struct sk_buff *skb) +{ + if (skb->destructor != sock_wfree) { + skb_orphan(skb); + return; + } + skb->slow_gro = 1; +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); @@ -2266,12 +2281,19 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk); static inline int sock_error(struct sock *sk) { int err; - if (likely(!sk->sk_err)) + + /* Avoid an atomic operation for the common case. + * This is racy since another cpu/thread can change sk_err under us. + */ + if (likely(data_race(!sk->sk_err))) return 0; + err = xchg(&sk->sk_err, 0); return -err; } +void sk_error_report(struct sock *sk); + static inline unsigned long sock_wspace(struct sock *sk) { int amt = 0; @@ -2378,6 +2400,11 @@ static inline gfp_t gfp_any(void) return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } +static inline gfp_t gfp_memcg_charge(void) +{ + return in_softirq() ? GFP_NOWAIT : GFP_KERNEL; +} + static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_rcvtimeo; @@ -2690,6 +2717,7 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; +#define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) @@ -2743,6 +2771,10 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) void sock_def_readable(struct sock *sk); int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); +void sock_set_timestamp(struct sock *sk, int optname, bool valbool); +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping); + void sock_enable_timestamps(struct sock *sk); void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 505f1e18e9bf..473b0b0fa4ab 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -13,8 +13,9 @@ extern spinlock_t reuseport_lock; struct sock_reuseport { struct rcu_head rcu; - u16 max_socks; /* length of socks */ - u16 num_socks; /* elements in socks */ + u16 max_socks; /* length of socks */ + u16 num_socks; /* elements in socks */ + u16 num_closed_socks; /* closed elements in socks */ /* The last synq overflow event timestamp of this * reuse->socks[] group. */ @@ -31,10 +32,14 @@ extern int reuseport_alloc(struct sock *sk, bool bind_inany); extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany); extern void reuseport_detach_sock(struct sock *sk); +void reuseport_stop_listen_sock(struct sock *sk); extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, struct sk_buff *skb, int hdr_len); +struct sock *reuseport_migrate_sock(struct sock *sk, + struct sock *migrating_sk, + struct sk_buff *skb); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index f1a5a9a3634d..60d806b6a5ae 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -180,6 +180,14 @@ struct switchdev_obj_in_state_mrp { typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); +struct switchdev_brport { + struct net_device *dev; + const void *ctx; + struct notifier_block *atomic_nb; + struct notifier_block *blocking_nb; + bool tx_fwd_offload; +}; + enum switchdev_notifier_type { SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, SWITCHDEV_FDB_DEL_TO_BRIDGE, @@ -197,11 +205,15 @@ enum switchdev_notifier_type { SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE, SWITCHDEV_VXLAN_FDB_OFFLOADED, + + SWITCHDEV_BRPORT_OFFLOADED, + SWITCHDEV_BRPORT_UNOFFLOADED, }; struct switchdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; + const void *ctx; }; struct switchdev_notifier_fdb_info { @@ -225,6 +237,11 @@ struct switchdev_notifier_port_attr_info { bool handled; }; +struct switchdev_notifier_brport_info { + struct switchdev_notifier_info info; /* must be first */ + const struct switchdev_brport brport; +}; + static inline struct net_device * switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info) { @@ -237,8 +254,25 @@ switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info) return info->extack; } +static inline bool +switchdev_fdb_is_dynamically_learned(const struct switchdev_notifier_fdb_info *fdb_info) +{ + return !fdb_info->added_by_user && !fdb_info->is_local; +} + #ifdef CONFIG_NET_SWITCHDEV +int switchdev_bridge_port_offload(struct net_device *brport_dev, + struct net_device *dev, const void *ctx, + struct notifier_block *atomic_nb, + struct notifier_block *blocking_nb, + bool tx_fwd_offload, + struct netlink_ext_ack *extack); +void switchdev_bridge_port_unoffload(struct net_device *brport_dev, + const void *ctx, + struct notifier_block *atomic_nb, + struct notifier_block *blocking_nb); + void switchdev_deferred_process(void); int switchdev_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, @@ -265,26 +299,69 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, struct net_device *group_dev, bool joining); +int switchdev_handle_fdb_add_to_device(struct net_device *dev, + const struct switchdev_notifier_fdb_info *fdb_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*add_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info), + int (*lag_add_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info)); + +int switchdev_handle_fdb_del_to_device(struct net_device *dev, + const struct switchdev_notifier_fdb_info *fdb_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*del_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info), + int (*lag_del_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info)); + int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*add_cb)(struct net_device *dev, + int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)); int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*del_cb)(struct net_device *dev, + int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)); int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), - int (*set_cb)(struct net_device *dev, + int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)); #else +static inline int +switchdev_bridge_port_offload(struct net_device *brport_dev, + struct net_device *dev, const void *ctx, + struct notifier_block *atomic_nb, + struct notifier_block *blocking_nb, + bool tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static inline void +switchdev_bridge_port_unoffload(struct net_device *brport_dev, + const void *ctx, + struct notifier_block *atomic_nb, + struct notifier_block *blocking_nb) +{ +} + static inline void switchdev_deferred_process(void) { } @@ -349,10 +426,42 @@ call_switchdev_blocking_notifiers(unsigned long val, } static inline int +switchdev_handle_fdb_add_to_device(struct net_device *dev, + const struct switchdev_notifier_fdb_info *fdb_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*add_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info), + int (*lag_add_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info)) +{ + return 0; +} + +static inline int +switchdev_handle_fdb_del_to_device(struct net_device *dev, + const struct switchdev_notifier_fdb_info *fdb_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*del_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info), + int (*lag_del_cb)(struct net_device *dev, + const struct net_device *orig_dev, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info)) +{ + return 0; +} + +static inline int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*add_cb)(struct net_device *dev, + int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)) { @@ -363,7 +472,7 @@ static inline int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), - int (*del_cb)(struct net_device *dev, + int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)) { return 0; @@ -373,7 +482,7 @@ static inline int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), - int (*set_cb)(struct net_device *dev, + int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)) { diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index f051046ba034..f94b8bc26f9e 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -16,6 +16,7 @@ struct tcf_vlan_params { u16 tcfv_push_vid; __be16 tcfv_push_proto; u8 tcfv_push_prio; + bool tcfv_push_prio_exists; struct rcu_head rcu; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index d05193cb0d99..3166dc15d7d6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -412,6 +412,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); int tcp_set_rcvlowat(struct sock *sk, int val); int tcp_set_window_clamp(struct sock *sk, int val); +void tcp_update_recv_tstamps(struct sk_buff *skb, + struct scm_timestamping_internal *tss); +void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, + struct scm_timestamping_internal *tss); void tcp_data_ready(struct sock *sk); #ifdef CONFIG_MMU int tcp_mmap(struct file *file, struct socket *sock, @@ -682,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp) static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) { + /* mptcp hooks are only on the slow path */ + if (sk_is_mptcp((struct sock *)tp)) + return; + tp->pred_flags = htonl((tp->tcp_header_len << 26) | ntohl(TCP_FLAG_ACK) | snd_wnd); @@ -1701,7 +1709,6 @@ struct tcp_fastopen_context { struct rcu_head rcu; }; -extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; void tcp_fastopen_active_disable(struct sock *sk); bool tcp_fastopen_active_should_disable(struct sock *sk); void tcp_fastopen_active_disable_ofo_check(struct sock *sk); @@ -1951,7 +1958,6 @@ struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; - struct tcp_seq_afinfo *bpf_seq_afinfo; int bucket, offset, sbucket, num; loff_t last_pos; }; diff --git a/include/net/tls.h b/include/net/tls.h index 8341a8d1e807..be4b3e1cac46 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -79,8 +79,6 @@ __SNMP_INC_STATS((net)->mib.tls_statistics, field) #define TLS_INC_STATS(net, field) \ SNMP_INC_STATS((net)->mib.tls_statistics, field) -#define __TLS_DEC_STATS(net, field) \ - __SNMP_DEC_STATS((net)->mib.tls_statistics, field) #define TLS_DEC_STATS(net, field) \ SNMP_DEC_STATS((net)->mib.tls_statistics, field) @@ -471,7 +469,7 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) static inline void tls_err_abort(struct sock *sk, int err) { sk->sk_err = err; - sk->sk_error_report(sk); + sk_error_report(sk); } static inline bool tls_bigint_increment(unsigned char *seq, int len) diff --git a/include/net/xdp.h b/include/net/xdp.h index a5bc214a49d9..ad5b02dcb6f4 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -170,6 +170,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct net_device *dev); int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); +struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); static inline void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) @@ -275,6 +276,11 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) return unlikely(xdp->data_meta > xdp->data); } +static inline bool xdp_metalen_invalid(unsigned long metalen) +{ + return (metalen & (sizeof(__u32) - 1)) || (metalen > 32); +} + struct xdp_attachment_info { struct bpf_prog *prog; u32 flags; diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 9c0722c6d7ac..fff069d2ed1b 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -37,7 +37,7 @@ struct xdp_umem { struct xsk_map { struct bpf_map map; spinlock_t lock; /* Synchronize map updates */ - struct xdp_sock *xsk_map[]; + struct xdp_sock __rcu *xsk_map[]; }; struct xdp_sock { diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c58a6d4eb610..2308210793a0 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -145,6 +145,12 @@ enum { XFRM_MODE_FLAG_TUNNEL = 1, }; +enum xfrm_replay_mode { + XFRM_REPLAY_MODE_LEGACY, + XFRM_REPLAY_MODE_BMP, + XFRM_REPLAY_MODE_ESN, +}; + /* Full description of state of transformer. */ struct xfrm_state { possible_net_t xs_net; @@ -154,6 +160,7 @@ struct xfrm_state { }; struct hlist_node bysrc; struct hlist_node byspi; + struct hlist_node byseq; refcount_t refcnt; spinlock_t lock; @@ -214,9 +221,8 @@ struct xfrm_state { struct xfrm_replay_state preplay; struct xfrm_replay_state_esn *preplay_esn; - /* The functions for replay detection. */ - const struct xfrm_replay *repl; - + /* replay detection mode */ + enum xfrm_replay_mode repl_mode; /* internal flag that only holds state for delayed aevent at the * moment */ @@ -296,18 +302,6 @@ struct km_event { struct net *net; }; -struct xfrm_replay { - void (*advance)(struct xfrm_state *x, __be32 net_seq); - int (*check)(struct xfrm_state *x, - struct sk_buff *skb, - __be32 net_seq); - int (*recheck)(struct xfrm_state *x, - struct sk_buff *skb, - __be32 net_seq); - void (*notify)(struct xfrm_state *x, int event); - int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); -}; - struct xfrm_if_cb { struct xfrm_if *(*decode_session)(struct sk_buff *skb, unsigned short family); @@ -387,7 +381,6 @@ void xfrm_flush_gc(void); void xfrm_state_delete_tunnel(struct xfrm_state *x); struct xfrm_type { - char *description; struct module *owner; u8 proto; u8 flags; @@ -402,14 +395,12 @@ struct xfrm_type { int (*output)(struct xfrm_state *, struct sk_buff *pskb); int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); - int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); }; int xfrm_register_type(const struct xfrm_type *type, unsigned short family); void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); struct xfrm_type_offload { - char *description; struct module *owner; u8 proto; void (*encap)(struct xfrm_state *, struct sk_buff *pskb); @@ -1024,6 +1015,7 @@ struct xfrm_offload { #define CRYPTO_INVALID_PROTOCOL 128 __u8 proto; + __u8 inner_ipproto; }; struct sec_path { @@ -1083,6 +1075,22 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un } #ifdef CONFIG_XFRM +static inline bool +xfrm_default_allow(struct net *net, int dir) +{ + u8 def = net->xfrm.policy_default; + + switch (dir) { + case XFRM_POLICY_IN: + return def & XFRM_POL_DEFAULT_IN ? false : true; + case XFRM_POLICY_OUT: + return def & XFRM_POL_DEFAULT_OUT ? false : true; + case XFRM_POLICY_FWD: + return def & XFRM_POL_DEFAULT_FWD ? false : true; + } + return false; +} + int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); @@ -1096,9 +1104,13 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, ndir, skb, family); - return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || - (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || - __xfrm_policy_check(sk, ndir, skb, family); + if (xfrm_default_allow(net, dir)) + return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || + __xfrm_policy_check(sk, ndir, skb, family); + else + return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || + __xfrm_policy_check(sk, ndir, skb, family); } static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) @@ -1150,9 +1162,13 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) { struct net *net = dev_net(skb->dev); - return !net->xfrm.policy_count[XFRM_POLICY_OUT] || - (skb_dst(skb)->flags & DST_NOXFRM) || - __xfrm_route_forward(skb, family); + if (xfrm_default_allow(net, XFRM_POLICY_FWD)) + return !net->xfrm.policy_count[XFRM_POLICY_OUT] || + (skb_dst(skb)->flags & DST_NOXFRM) || + __xfrm_route_forward(skb, family); + else + return (skb_dst(skb)->flags & DST_NOXFRM) || + __xfrm_route_forward(skb, family); } static inline int xfrm4_route_forward(struct sk_buff *skb) @@ -1546,6 +1562,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); +u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); @@ -1570,7 +1587,6 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); int xfrm4_rcv(struct sk_buff *skb); -int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { @@ -1581,7 +1597,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) } int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); -int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); @@ -1605,9 +1620,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); -int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); -int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, - u8 **prevhdr); #ifdef CONFIG_XFRM void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); @@ -1721,6 +1733,12 @@ static inline int xfrm_policy_id2dir(u32 index) } #ifdef CONFIG_XFRM +void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq); +int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); +void xfrm_replay_notify(struct xfrm_state *x, int event); +int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb); +int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); + static inline int xfrm_aevent_is_on(struct net *net) { struct sock *nlsk; diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index eaa8386dbc63..7a9a23e7a604 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, { bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; - if (pool->dma_pages_cnt && cross_pg) { + if (likely(!cross_pg)) + return false; + + if (pool->dma_pages_cnt) { return !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); } - return false; + + /* skb path */ + return addr + len > pool->addrs_cnt; } static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 57c1ac881d08..7e542205861c 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -206,11 +206,6 @@ static inline u8 ib_get_lver(struct ib_header *hdr) IB_LVER_MASK); } -static inline u16 ib_get_len(struct ib_header *hdr) -{ - return (u16)(be16_to_cpu(hdr->lrh[2])); -} - static inline u32 ib_get_qkey(struct ib_other_headers *ohdr) { return be32_to_cpu(ohdr->u.ud.deth[0]); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index f1d34f06a68b..465b0d0bdaf8 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -718,27 +718,26 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); /** - * ib_cancel_mad - Cancels an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. - * @send_buf: Indicates the MAD to cancel. - * - * MADs will be returned to the user through the corresponding - * ib_mad_send_handler. - */ -void ib_cancel_mad(struct ib_mad_agent *mad_agent, - struct ib_mad_send_buf *send_buf); - -/** * ib_modify_mad - Modifies an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. * @send_buf: Indicates the MAD to modify. * @timeout_ms: New timeout value for sent MAD. * * This call will reset the timeout value for a sent MAD to the specified * value. */ -int ib_modify_mad(struct ib_mad_agent *mad_agent, - struct ib_mad_send_buf *send_buf, u32 timeout_ms); +int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms); + +/** + * ib_cancel_mad - Cancels an outstanding send MAD operation. + * @send_buf: Indicates the MAD to cancel. + * + * MADs will be returned to the user through the corresponding + * ib_mad_send_handler. + */ +static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf) +{ + ib_modify_mad(send_buf, 0); +} /** * ib_create_send_mad - Allocate and initialize a data buffer and work request diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index ba3c808a3789..3634d4cc7a56 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -366,20 +366,6 @@ struct ib_sa_mcmember_rec { #define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF -struct ib_sa_service_rec { - u64 id; - union ib_gid gid; - __be16 pkey; - /* reserved */ - u32 lease; - u8 key[16]; - u8 name[64]; - u8 data8[16]; - u16 data16[8]; - u32 data32[4]; - u64 data64[2]; -}; - #define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0) #define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1) #define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2) @@ -430,16 +416,6 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, void *context), void *context, struct ib_sa_query **query); -int ib_sa_service_rec_query(struct ib_sa_client *client, - struct ib_device *device, u32 port_num, u8 method, - struct ib_sa_service_rec *rec, - ib_sa_comp_mask comp_mask, unsigned long timeout_ms, - gfp_t gfp_mask, - void (*callback)(int status, - struct ib_sa_service_rec *resp, - void *context), - void *context, struct ib_sa_query **sa_query); - struct ib_sa_multicast { struct ib_sa_mcmember_rec rec; ib_sa_comp_mask comp_mask; diff --git a/include/rdma/ib_sysfs.h b/include/rdma/ib_sysfs.h new file mode 100644 index 000000000000..3b77cfd74d9a --- /dev/null +++ b/include/rdma/ib_sysfs.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2021 Mellanox Technologies Ltd. All rights reserved. + */ +#ifndef DEF_RDMA_IB_SYSFS_H +#define DEF_RDMA_IB_SYSFS_H + +#include <linux/sysfs.h> + +struct ib_device; + +struct ib_port_attribute { + struct attribute attr; + ssize_t (*show)(struct ib_device *ibdev, u32 port_num, + struct ib_port_attribute *attr, char *buf); + ssize_t (*store)(struct ib_device *ibdev, u32 port_num, + struct ib_port_attribute *attr, const char *buf, + size_t count); +}; + +#define IB_PORT_ATTR_RW(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_RW(_name) + +#define IB_PORT_ATTR_ADMIN_RW(_name) \ + struct ib_port_attribute ib_port_attr_##_name = \ + __ATTR_RW_MODE(_name, 0600) + +#define IB_PORT_ATTR_RO(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_RO(_name) + +#define IB_PORT_ATTR_WO(_name) \ + struct ib_port_attribute ib_port_attr_##_name = __ATTR_WO(_name) + +struct ib_device *ib_port_sysfs_get_ibdev_kobj(struct kobject *kobj, + u32 *port_num); + +#endif diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 676c57f5ca80..5ae9dff74dac 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -26,9 +26,7 @@ struct ib_umem { u32 is_odp : 1; u32 is_dmabuf : 1; struct work_struct work; - struct sg_table sg_head; - int nmap; - unsigned int sg_nents; + struct sg_append_table sgt_append; }; struct ib_umem_dmabuf { @@ -56,7 +54,7 @@ static inline int ib_umem_offset(struct ib_umem *umem) static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, unsigned long pgsz) { - return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) & + return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & (pgsz - 1); } @@ -77,7 +75,8 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, struct ib_umem *umem, unsigned long pgsz) { - __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); + __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, + umem->sgt_append.sgt.nents, pgsz); } /** @@ -128,7 +127,7 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, unsigned long pgsz_bitmap, u64 pgoff_bitmask) { - struct scatterlist *sg = umem->sg_head.sgl; + struct scatterlist *sg = umem->sgt_append.sgt.sgl; dma_addr_t dma_addr; dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7e2f3699b898..4b50d9a3018a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -50,6 +50,8 @@ struct ib_uqp_object; struct ib_usrq_object; struct ib_uwq_object; struct rdma_cm_id; +struct ib_port; +struct hw_stats_device_data; extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -2139,7 +2141,6 @@ struct ib_flow_action { }; struct ib_mad; -struct ib_grh; enum ib_process_mad_flags { IB_MAD_IGNORE_MKEY = 1, @@ -2175,15 +2176,17 @@ struct ib_port_data { struct ib_port_immutable immutable; spinlock_t pkey_list_lock; + + spinlock_t netdev_lock; + struct list_head pkey_list; struct ib_port_cache cache; - spinlock_t netdev_lock; struct net_device __rcu *netdev; struct hlist_node ndev_hash_link; struct rdma_port_counter port_counter; - struct rdma_hw_stats *hw_stats; + struct ib_port *sysfs; }; /* rdma netdev type - specifies protocol type */ @@ -2265,8 +2268,13 @@ struct iw_cm_conn_param; !__same_type(((struct drv_struct *)NULL)->member, \ struct ib_struct))) -#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ - ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) +#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ + ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ + gfp, false)) + +#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \ + ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ + GFP_KERNEL, true)) #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) @@ -2299,6 +2307,14 @@ struct ib_device_ops { u32 uverbs_abi_ver; unsigned int uverbs_no_driver_id_binding:1; + /* + * NOTE: New drivers should not make use of device_group; instead new + * device parameter should be exposed via netlink command. This + * mechanism exists only for existing drivers. + */ + const struct attribute_group *device_group; + const struct attribute_group **port_groups; + int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, const struct ib_send_wr **bad_send_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, @@ -2424,9 +2440,8 @@ struct ib_device_ops { struct ib_udata *udata); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); - struct ib_qp *(*create_qp)(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata); + int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata); int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_udata *udata); int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, @@ -2459,6 +2474,14 @@ struct ib_device_ops { enum ib_uverbs_advise_mr_advice advice, u32 flags, struct ib_sge *sg_list, u32 num_sge, struct uverbs_attr_bundle *attrs); + + /* + * Kernel users should universally support relaxed ordering (RO), as + * they are designed to read data only after observing the CQE and use + * the DMA API correctly. + * + * Some drivers implicitly enable RO if platform supports it. + */ int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, @@ -2523,13 +2546,14 @@ struct ib_device_ops { unsigned int *meta_sg_offset); /** - * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the - * driver initialized data. The struct is kfree()'ed by the sysfs - * core when the device is removed. A lifespan of -1 in the return - * struct tells the core to set a default lifespan. + * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and + * fill in the driver initialized data. The struct is kfree()'ed by + * the sysfs core when the device is removed. A lifespan of -1 in the + * return struct tells the core to set a default lifespan. */ - struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, - u32 port_num); + struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device); + struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device, + u32 port_num); /** * get_hw_stats - Fill in the counter value(s) in the stats struct. * @index - The index in the value array we wish to have updated, or @@ -2544,12 +2568,7 @@ struct ib_device_ops { */ int (*get_hw_stats)(struct ib_device *device, struct rdma_hw_stats *stats, u32 port, int index); - /* - * This function is called once for each port when a ib device is - * registered. - */ - int (*init_port)(struct ib_device *device, u32 port_num, - struct kobject *port_sysfs); + /** * Allows rdma drivers to add their own restrack attributes. */ @@ -2620,11 +2639,18 @@ struct ib_device_ops { int (*query_ucontext)(struct ib_ucontext *context, struct uverbs_attr_bundle *attrs); + /* + * Provide NUMA node. This API exists for rdmavt/hfi1 only. + * Everyone else relies on Linux memory management model. + */ + int (*get_numa_node)(struct ib_device *dev); + DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_pd); + DECLARE_RDMA_OBJ_SIZE(ib_qp); DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); @@ -2675,11 +2701,12 @@ struct ib_device { struct ib_core_device coredev; }; - /* First group for device attributes, - * Second group for driver provided attributes (optional). - * It is NULL terminated array. + /* First group is for device attributes, + * Second group is for driver provided attributes (optional). + * Third group is for the hw_stats + * It is a NULL terminated array. */ - const struct attribute_group *groups[3]; + const struct attribute_group *groups[4]; u64 uverbs_cmd_mask; @@ -2694,8 +2721,7 @@ struct ib_device { u8 node_type; u32 phys_port_cnt; struct ib_device_attr attrs; - struct attribute_group *hw_stats_ag; - struct rdma_hw_stats *hw_stats; + struct hw_stats_device_data *hw_stats_data; #ifdef CONFIG_CGROUP_RDMA struct rdmacg_device cg_device; @@ -2731,6 +2757,15 @@ struct ib_device { u32 lag_flags; }; +static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size, + gfp_t gfp, bool is_numa_aware) +{ + if (is_numa_aware && dev->ops.get_numa_node) + return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev)); + + return kzalloc(size, gfp); +} + struct ib_client_nl_info; struct ib_client { const char *name; @@ -3653,13 +3688,21 @@ static inline int ib_post_srq_recv(struct ib_srq *srq, bad_recv_wr ? : &dummy); } -struct ib_qp *ib_create_named_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - const char *caller); +struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + const char *caller); +/** + * ib_create_qp - Creates a kernel QP associated with the specific protection + * domain. + * @pd: The protection domain associated with the QP. + * @init_attr: A list of initial attributes required to create the + * QP. If QP creation succeeds, then the attributes are updated to + * the actual capabilities of the created QP. + */ static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr) { - return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME); + return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME); } /** @@ -4043,6 +4086,34 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, } /** + * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses + * @dev: The device for which the DMA addresses are to be created + * @sg: The sg_table object describing the buffer + * @direction: The direction of the DMA + * @attrs: Optional DMA attributes for the map operation + */ +static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev, + struct sg_table *sgt, + enum dma_data_direction direction, + unsigned long dma_attrs) +{ + if (ib_uses_virt_dma(dev)) { + ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents); + return 0; + } + return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs); +} + +static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev, + struct sg_table *sgt, + enum dma_data_direction direction, + unsigned long dma_attrs) +{ + if (!ib_uses_virt_dma(dev)) + dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs); +} + +/** * ib_dma_map_sg - Map a scatter/gather list to DMA addresses * @dev: The device for which the DMA addresses are to be created * @sg: The array of scatter/gather entries @@ -4286,8 +4357,6 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); -int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, - u32 wq_attr_mask); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); @@ -4571,28 +4640,6 @@ int rdma_init_netdev(struct ib_device *device, u32 port_num, struct net_device *netdev); /** - * rdma_set_device_sysfs_group - Set device attributes group to have - * driver specific sysfs entries at - * for infiniband class. - * - * @device: device pointer for which attributes to be created - * @group: Pointer to group which should be added when device - * is registered with sysfs. - * rdma_set_device_sysfs_group() allows existing drivers to expose one - * group per device to have sysfs attributes. - * - * NOTE: New drivers should not make use of this API; instead new device - * parameter should be exposed via netlink command. This API and mechanism - * exist only for existing drivers. - */ -static inline void -rdma_set_device_sysfs_group(struct ib_device *dev, - const struct attribute_group *group) -{ - dev->groups[1] = group; -} - -/** * rdma_device_to_ibdev - Get ib_device pointer from device pointer * * @device: device pointer for which ib_device pointer to retrieve @@ -4627,7 +4674,7 @@ static inline int ibdev_to_node(struct ib_device *ibdev) * * NOTE: New drivers should not make use of this API; This API is only for * existing drivers who have exposed sysfs entries using - * rdma_set_device_sysfs_group(). + * ops->device_group. */ #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 8275954f5ce6..2e58d5e6ac0e 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -444,7 +444,7 @@ struct rvt_qp { /* * This sge list MUST be last. Do not add anything below here. */ - struct rvt_sge r_sg_list[] /* verified SGEs */ + struct rvt_sge *r_sg_list /* verified SGEs */ ____cacheline_aligned_in_smp; }; diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h index 9e273fed0a85..00191695233a 100644 --- a/include/scsi/fc/fc_ms.h +++ b/include/scsi/fc/fc_ms.h @@ -25,6 +25,12 @@ #define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */ /* + * Management server FDMI specifications. + */ +#define FDMI_V1 1 /* FDMI version 1 specifications */ +#define FDMI_V2 2 /* FDMI version 2 specifications */ + +/* * Management server FDMI Requests. */ enum fc_fdmi_req { @@ -57,22 +63,36 @@ enum fc_fdmi_hba_attr_type { FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009, FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B, + FC_FDMI_HBA_ATTR_NODESYMBLNAME = 0x000C, + FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO = 0x000D, + FC_FDMI_HBA_ATTR_NUMBEROFPORTS = 0x000E, + FC_FDMI_HBA_ATTR_FABRICNAME = 0x000F, + FC_FDMI_HBA_ATTR_BIOSVERSION = 0x0010, + FC_FDMI_HBA_ATTR_BIOSSTATE = 0x0011, + FC_FDMI_HBA_ATTR_VENDORIDENTIFIER = 0x00E0, }; /* * HBA Attribute Length */ #define FC_FDMI_HBA_ATTR_NODENAME_LEN 8 -#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80 -#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80 -#define FC_FDMI_HBA_ATTR_MODEL_LEN 256 -#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256 -#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256 -#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256 +#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64 +#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64 +#define FC_FDMI_HBA_ATTR_MODEL_LEN 64 +#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 64 +#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 128 #define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4 +#define FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN 64 +#define FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN 4 +#define FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN 4 +#define FC_FDMI_HBA_ATTR_FABRICNAME_LEN 8 +#define FC_FDMI_HBA_ATTR_BIOSVERSION_LEN 64 +#define FC_FDMI_HBA_ATTR_BIOSSTATE_LEN 4 +#define FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN 8 /* * Port Attribute Type @@ -84,6 +104,16 @@ enum fc_fdmi_port_attr_type { FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004, FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005, FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006, + FC_FDMI_PORT_ATTR_NODENAME = 0x0007, + FC_FDMI_PORT_ATTR_PORTNAME = 0x0008, + FC_FDMI_PORT_ATTR_SYMBOLICNAME = 0x0009, + FC_FDMI_PORT_ATTR_PORTTYPE = 0x000A, + FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC = 0x000B, + FC_FDMI_PORT_ATTR_FABRICNAME = 0x000C, + FC_FDMI_PORT_ATTR_CURRENTFC4TYPE = 0x000D, + FC_FDMI_PORT_ATTR_PORTSTATE = 0x101, + FC_FDMI_PORT_ATTR_DISCOVEREDPORTS = 0x102, + FC_FDMI_PORT_ATTR_PORTID = 0x103, }; /* @@ -95,6 +125,17 @@ enum fc_fdmi_port_attr_type { #define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4 #define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256 #define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256 +#define FC_FDMI_PORT_ATTR_NODENAME_LEN 8 +#define FC_FDMI_PORT_ATTR_PORTNAME_LEN 8 +#define FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN 256 +#define FC_FDMI_PORT_ATTR_PORTTYPE_LEN 4 +#define FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN 4 +#define FC_FDMI_PORT_ATTR_FABRICNAME_LEN 8 +#define FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN 32 +#define FC_FDMI_PORT_ATTR_PORTSTATE_LEN 4 +#define FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN 4 +#define FC_FDMI_PORT_ATTR_PORTID_LEN 4 + /* * HBA Attribute ID diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index b71b5c4f418c..7b192d88f186 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -52,7 +52,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2) } /* - * useful common(control and data pathes) macro + * useful common(control and data paths) macro */ #define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) #define hton24(p, v) { \ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 9b87e1a1c646..eeb8d689ff6b 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -399,7 +399,7 @@ struct fc_seq { * @sid: Source FCID * @did: Destination FCID * @esb_stat: ESB exchange status - * @r_a_tov: Resouce allocation time out value (in msecs) + * @r_a_tov: Resource allocation time out value (in msecs) * @seq_id: The next sequence ID to use * @encaps: encapsulation information for lower-level driver * @f_ctl: F_CTL flags for the sequence @@ -668,7 +668,7 @@ enum fc_lport_event { * @wwnn: World Wide Node Name * @service_params: Common service parameters * @e_d_tov: Error detection timeout value - * @r_a_tov: Resouce allocation timeout value + * @r_a_tov: Resource allocation timeout value * @rnid_gen: RNID information * @sg_supp: Indicates if scatter gather is supported * @seq_offload: Indicates if sequence offload is supported @@ -841,7 +841,7 @@ static inline void fc_lport_free_stats(struct fc_lport *lport) /** * lport_priv() - Return the private data from a local port - * @lport: The local port whose private data is to be retreived + * @lport: The local port whose private data is to be retrieved */ static inline void *lport_priv(const struct fc_lport *lport) { diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 02f966e9358f..4ee233e5a6ff 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -145,6 +145,13 @@ static inline void* iscsi_next_hdr(struct iscsi_task *task) return (void*)task->hdr + task->hdr_len; } +static inline bool iscsi_task_is_completed(struct iscsi_task *task) +{ + return task->state == ISCSI_TASK_COMPLETED || + task->state == ISCSI_TASK_ABRT_TMF || + task->state == ISCSI_TASK_ABRT_SESS_RECOV; +} + /* Connection's states */ enum { ISCSI_CONN_INITIAL_STAGE, @@ -195,12 +202,6 @@ struct iscsi_conn { unsigned long suspend_tx; /* suspend Tx */ unsigned long suspend_rx; /* suspend Rx */ - /* abort */ - wait_queue_head_t ehwait; /* used in eh_abort() */ - struct iscsi_tm tmhdr; - struct timer_list tmf_timer; - int tmf_state; /* see TMF_INITIAL, etc.*/ - /* negotiated params */ unsigned max_recv_dlength; /* initiator_max_recv_dsl*/ unsigned max_xmit_dlength; /* target_max_recv_dsl */ @@ -270,6 +271,12 @@ struct iscsi_session { * and recv lock. */ struct mutex eh_mutex; + /* abort */ + wait_queue_head_t ehwait; /* used in eh_abort() */ + struct iscsi_tm tmhdr; + struct timer_list tmf_timer; + int tmf_state; /* see TMF_INITIAL, etc.*/ + struct iscsi_task *running_aborted_task; /* iSCSI session-wide sequencing */ uint32_t cmdsn; @@ -424,6 +431,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *); extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, int); +extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); extern void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 9271d7a49b90..6fe125a71b60 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -474,10 +474,16 @@ enum service_response { }; enum exec_status { - /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of - * them here to silence 'case value not in enumerated type' warnings + /* + * Values 0..0x7f are used to return the SAM_STAT_* codes. To avoid + * 'case value not in enumerated type' compiler warnings every value + * returned through the exec_status enum needs an alias with the SAS_ + * prefix here. */ - __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION, + SAS_SAM_STAT_GOOD = SAM_STAT_GOOD, + SAS_SAM_STAT_BUSY = SAM_STAT_BUSY, + SAS_SAM_STAT_TASK_ABORTED = SAM_STAT_TASK_ABORTED, + SAS_SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION, SAS_DEV_NO_RESPONSE = 0x80, SAS_DATA_UNDERRUN, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 246ced401683..3e46859774c8 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <scsi/scsi_common.h> #include <scsi/scsi_proto.h> +#include <scsi/scsi_status.h> struct scsi_cmnd; @@ -30,32 +31,6 @@ enum scsi_timeouts { */ #define SCAN_WILD_CARD ~0 -/** scsi_status_is_good - check the status return. - * - * @status: the status passed up from the driver (including host and - * driver components) - * - * This returns true for known good conditions that may be treated as - * command completed normally - */ -static inline int scsi_status_is_good(int status) -{ - /* - * FIXME: bit0 is listed as reserved in SCSI-2, but is - * significant in SCSI-3. For now, we follow the SCSI-2 - * behaviour and ignore reserved bits. - */ - status &= 0xfe; - return ((status == SAM_STAT_GOOD) || - (status == SAM_STAT_CONDITION_MET) || - /* Next two "intermediate" statuses are obsolete in SAM-4 */ - (status == SAM_STAT_INTERMEDIATE) || - (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) || - /* FIXME: this is obsolete in SAM-3 */ - (status == SAM_STAT_COMMAND_TERMINATED)); -} - - /* * standard mode-select header prepended to all mode-select commands */ @@ -88,94 +63,31 @@ static inline int scsi_is_wlun(u64 lun) return (lun & 0xff00) == SCSI_W_LUN_BASE; } +/** + * scsi_status_is_check_condition - check the status return. + * + * @status: the status passed up from the driver (including host and + * driver components) + * + * This returns true if the status code is SAM_STAT_CHECK_CONDITION. + */ +static inline int scsi_status_is_check_condition(int status) +{ + if (status < 0) + return false; + status &= 0xfe; + return status == SAM_STAT_CHECK_CONDITION; +} /* - * MESSAGE CODES + * Extended message codes. */ - -#define COMMAND_COMPLETE 0x00 -#define EXTENDED_MESSAGE 0x01 #define EXTENDED_MODIFY_DATA_POINTER 0x00 #define EXTENDED_SDTR 0x01 #define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */ #define EXTENDED_WDTR 0x03 #define EXTENDED_PPR 0x04 #define EXTENDED_MODIFY_BIDI_DATA_PTR 0x05 -#define SAVE_POINTERS 0x02 -#define RESTORE_POINTERS 0x03 -#define DISCONNECT 0x04 -#define INITIATOR_ERROR 0x05 -#define ABORT_TASK_SET 0x06 -#define MESSAGE_REJECT 0x07 -#define NOP 0x08 -#define MSG_PARITY_ERROR 0x09 -#define LINKED_CMD_COMPLETE 0x0a -#define LINKED_FLG_CMD_COMPLETE 0x0b -#define TARGET_RESET 0x0c -#define ABORT_TASK 0x0d -#define CLEAR_TASK_SET 0x0e -#define INITIATE_RECOVERY 0x0f /* SCSI-II only */ -#define RELEASE_RECOVERY 0x10 /* SCSI-II only */ -#define TERMINATE_IO_PROC 0x11 /* SCSI-II only */ -#define CLEAR_ACA 0x16 -#define LOGICAL_UNIT_RESET 0x17 -#define SIMPLE_QUEUE_TAG 0x20 -#define HEAD_OF_QUEUE_TAG 0x21 -#define ORDERED_QUEUE_TAG 0x22 -#define IGNORE_WIDE_RESIDUE 0x23 -#define ACA 0x24 -#define QAS_REQUEST 0x55 - -/* Old SCSI2 names, don't use in new code */ -#define BUS_DEVICE_RESET TARGET_RESET -#define ABORT ABORT_TASK_SET - -/* - * Host byte codes - */ - -#define DID_OK 0x00 /* NO error */ -#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */ -#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */ -#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */ -#define DID_BAD_TARGET 0x04 /* BAD target. */ -#define DID_ABORT 0x05 /* Told to abort for some other reason */ -#define DID_PARITY 0x06 /* Parity error */ -#define DID_ERROR 0x07 /* Internal error */ -#define DID_RESET 0x08 /* Reset by somebody. */ -#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */ -#define DID_PASSTHROUGH 0x0a /* Force command past mid-layer */ -#define DID_SOFT_ERROR 0x0b /* The low level driver just wish a retry */ -#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */ -#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also - * without decrementing the retry count */ -#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution - * and the driver blocked the port to - * recover the link. Transport class will - * retry or fail IO */ -#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */ -#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on - * other paths */ -#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other - * paths might yield different results */ -#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */ -#define DID_MEDIUM_ERROR 0x13 /* Medium error */ -#define DID_TRANSPORT_MARGINAL 0x14 /* Transport marginal errors */ -#define DRIVER_OK 0x00 /* Driver status */ - -/* - * These indicate the error that occurred, and what is available. - */ - -#define DRIVER_BUSY 0x01 -#define DRIVER_SOFT 0x02 -#define DRIVER_MEDIA 0x03 -#define DRIVER_ERROR 0x04 - -#define DRIVER_INVALID 0x05 -#define DRIVER_TIMEOUT 0x06 -#define DRIVER_HARD 0x07 -#define DRIVER_SENSE 0x08 /* * Internal return values. @@ -206,14 +118,10 @@ enum scsi_disposition { * These are set by: * * status byte = set from target device - * msg_byte = return status from host adapter itself. + * msg_byte (unused) * host_byte = set by low-level driver to indicate status. - * driver_byte = set by mid-level. */ -#define status_byte(result) (((result) >> 1) & 0x7f) -#define msg_byte(result) (((result) >> 8) & 0xff) #define host_byte(result) (((result) >> 16) & 0xff) -#define driver_byte(result) (((result) >> 24) & 0xff) #define sense_class(sense) (((sense) >> 4) & 0x7) #define sense_error(sense) ((sense) & 0xf) @@ -277,4 +185,35 @@ enum scsi_disposition { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 +/** scsi_status_is_good - check the status return. + * + * @status: the status passed up from the driver (including host and + * driver components) + * + * This returns true for known good conditions that may be treated as + * command completed normally + */ +static inline bool scsi_status_is_good(int status) +{ + if (status < 0) + return false; + + if (host_byte(status) == DID_NO_CONNECT) + return false; + + /* + * FIXME: bit0 is listed as reserved in SCSI-2, but is + * significant in SCSI-3. For now, we follow the SCSI-2 + * behaviour and ignore reserved bits. + */ + status &= 0xfe; + return ((status == SAM_STAT_GOOD) || + (status == SAM_STAT_CONDITION_MET) || + /* Next two "intermediate" statuses are obsolete in SAM-4 */ + (status == SAM_STAT_INTERMEDIATE) || + (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) || + /* FIXME: this is obsolete in SAM-3 */ + (status == SAM_STAT_COMMAND_TERMINATED)); +} + #endif /* _SCSI_SCSI_H */ diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h index 6b8128005af8..9b1f0f424a79 100644 --- a/include/scsi/scsi_bsg_iscsi.h +++ b/include/scsi/scsi_bsg_iscsi.h @@ -84,7 +84,7 @@ struct iscsi_bsg_reply { */ uint32_t result; - /* If there was reply_payload, how much was recevied ? */ + /* If there was reply_payload, how much was received ? */ uint32_t reply_payload_rcv_len; union { diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index fed024f4c02a..eaf04c9a1dfc 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -111,9 +111,6 @@ struct scsi_cmnd { reconnects. Probably == sector size */ - struct request *request; /* The command we are - working on */ - unsigned char *sense_buffer; /* obtained by REQUEST SENSE when * CHECK CONDITION is received on original @@ -142,10 +139,15 @@ struct scsi_cmnd { int flags; /* Command flags */ unsigned long state; /* Command completion state */ - unsigned char tag; /* SCSI-II queued command tag */ unsigned int extra_len; /* length of alignment and padding */ }; +/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */ +static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd) +{ + return blk_mq_rq_from_pdu(scmd); +} + /* * Return the driver private allocation behind the command. * Only works if cmd_size is set in the host template. @@ -158,7 +160,9 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd) /* make sure not to use it with passthrough commands */ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) { - return *(struct scsi_driver **)cmd->request->rq_disk->private_data; + struct request *rq = scsi_cmd_to_rq(cmd); + + return *(struct scsi_driver **)rq->rq_disk->private_data; } extern void scsi_finish_command(struct scsi_cmnd *cmd); @@ -220,6 +224,25 @@ static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd, buf, buflen); } +static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd) +{ + return blk_rq_pos(scsi_cmd_to_rq(scmd)); +} + +static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) +{ + unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT; + + return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift; +} + +static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd) +{ + unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT; + + return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift; +} + /* * The operations below are hints that tell the controller driver how * to handle I/Os with DIF or similar types of protection information. @@ -282,9 +305,11 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd) return scmd->prot_type; } -static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) +static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd) { - return blk_rq_pos(scmd->request); + struct request *rq = blk_mq_rq_from_pdu(scmd); + + return t10_pi_ref_tag(rq); } static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd) @@ -315,9 +340,9 @@ static inline void set_status_byte(struct scsi_cmnd *cmd, char status) cmd->result = (cmd->result & 0xffffff00) | status; } -static inline void set_msg_byte(struct scsi_cmnd *cmd, char status) +static inline u8 get_status_byte(struct scsi_cmnd *cmd) { - cmd->result = (cmd->result & 0xffff00ff) | (status << 8); + return cmd->result & 0xff; } static inline void set_host_byte(struct scsi_cmnd *cmd, char status) @@ -325,9 +350,36 @@ static inline void set_host_byte(struct scsi_cmnd *cmd, char status) cmd->result = (cmd->result & 0xff00ffff) | (status << 16); } -static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) +static inline u8 get_host_byte(struct scsi_cmnd *cmd) +{ + return (cmd->result >> 16) & 0xff; +} + +/** + * scsi_msg_to_host_byte() - translate message byte + * + * Translate the SCSI parallel message byte to a matching + * host byte setting. A message of COMMAND_COMPLETE indicates + * a successful command execution, any other message indicate + * an error. As the messages themselves only have a meaning + * for the SCSI parallel protocol this function translates + * them into a matching host byte value for SCSI EH. + */ +static inline void scsi_msg_to_host_byte(struct scsi_cmnd *cmd, u8 msg) { - cmd->result = (cmd->result & 0x00ffffff) | (status << 24); + switch (msg) { + case COMMAND_COMPLETE: + break; + case ABORT_TASK_SET: + set_host_byte(cmd, DID_ABORT); + break; + case TARGET_RESET: + set_host_byte(cmd, DID_RESET); + break; + default: + set_host_byte(cmd, DID_ERROR); + break; + } } static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) @@ -341,4 +393,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) return xfer_len; } +extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc, + u8 key, u8 asc, u8 ascq); + #endif /* _SCSI_SCSI_CMND_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index ac6ab16abee7..09a17f6e93a7 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -10,6 +10,7 @@ #include <linux/atomic.h> #include <linux/sbitmap.h> +struct bsg_device; struct device; struct request_queue; struct scsi_cmnd; @@ -205,6 +206,7 @@ struct scsi_device { unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ + unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */ bool offline_already; /* Device offline message logged */ @@ -234,6 +236,10 @@ struct scsi_device { size_t dma_drain_len; void *dma_drain_buf; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + + struct bsg_device *bsg_dev; unsigned char access_state; struct mutex state_mutex; enum scsi_device_state sdev_state; @@ -265,13 +271,15 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *, __printf(3, 4) void scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); -#define scmd_dbg(scmd, fmt, a...) \ - do { \ - if ((scmd)->request->rq_disk) \ - sdev_dbg((scmd)->device, "[%s] " fmt, \ - (scmd)->request->rq_disk->disk_name, ##a);\ - else \ - sdev_dbg((scmd)->device, fmt, ##a); \ +#define scmd_dbg(scmd, fmt, a...) \ + do { \ + struct request *__rq = scsi_cmd_to_rq((scmd)); \ + \ + if (__rq->rq_disk) \ + sdev_dbg((scmd)->device, "[%s] " fmt, \ + __rq->rq_disk->disk_name, ##a); \ + else \ + sdev_dbg((scmd)->device, fmt, ##a); \ } while (0) enum scsi_target_state { diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 3fdb322d4c4b..5d14adae21c7 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -28,7 +28,8 @@ #define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9)) /* override additional length field */ #define BLIST_INQUIRY_36 ((__force blist_flags_t)(1ULL << 10)) -#define __BLIST_UNUSED_11 ((__force blist_flags_t)(1ULL << 11)) +/* ignore MEDIA CHANGE unit attention after resuming from runtime suspend */ +#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11)) /* do not do automatic start on add */ #define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12)) #define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13)) @@ -73,8 +74,7 @@ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \ (__force blist_flags_t) \ ((__force __u64)__BLIST_LAST_USED - 1ULL))) -#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \ - __BLIST_UNUSED_13 | \ +#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \ __BLIST_UNUSED_14 | \ __BLIST_UNUSED_15 | \ __BLIST_UNUSED_16 | \ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d0bf88d77f02..75363707b73f 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -764,7 +764,7 @@ extern void scsi_host_put(struct Scsi_Host *t); extern struct Scsi_Host *scsi_host_lookup(unsigned short); extern const char *scsi_host_state_name(enum scsi_host_state); extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, - int status); + enum scsi_host_status status); static inline int __must_check scsi_add_host(struct Scsi_Host *host, struct device *dev) diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h index b465799f4d2d..d2cb9aeaf1f1 100644 --- a/include/scsi/scsi_ioctl.h +++ b/include/scsi/scsi_ioctl.h @@ -18,7 +18,9 @@ #ifdef __KERNEL__ +struct gendisk; struct scsi_device; +struct sg_io_hdr; /* * Structures used for scsi_ioctl et al. @@ -43,8 +45,11 @@ typedef struct scsi_fctargaddress { int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd, bool ndelay); -extern int scsi_ioctl(struct scsi_device *, int, void __user *); -extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); +int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode, + int cmd, void __user *arg); +int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); +int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); +bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode); #endif /* __KERNEL__ */ #endif /* _SCSI_IOCTL_H */ diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index c36860111932..f017843a8124 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -190,39 +190,21 @@ struct scsi_varlen_cdb_hdr { * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft * T10/1561-D Revision 4 Draft dated 7th November 2002. */ -#define SAM_STAT_GOOD 0x00 -#define SAM_STAT_CHECK_CONDITION 0x02 -#define SAM_STAT_CONDITION_MET 0x04 -#define SAM_STAT_BUSY 0x08 -#define SAM_STAT_INTERMEDIATE 0x10 -#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14 -#define SAM_STAT_RESERVATION_CONFLICT 0x18 -#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */ -#define SAM_STAT_TASK_SET_FULL 0x28 -#define SAM_STAT_ACA_ACTIVE 0x30 -#define SAM_STAT_TASK_ABORTED 0x40 - -/* - * Status codes. These are deprecated as they are shifted 1 bit right - * from those found in the SCSI standards. This causes confusion for - * applications that are ported to several OSes. Prefer SAM Status codes - * above. - */ - -#define GOOD 0x00 -#define CHECK_CONDITION 0x01 -#define CONDITION_GOOD 0x02 -#define BUSY 0x04 -#define INTERMEDIATE_GOOD 0x08 -#define INTERMEDIATE_C_GOOD 0x0a -#define RESERVATION_CONFLICT 0x0c -#define COMMAND_TERMINATED 0x11 -#define QUEUE_FULL 0x14 -#define ACA_ACTIVE 0x18 -#define TASK_ABORTED 0x20 - -#define STATUS_MASK 0xfe +enum sam_status { + SAM_STAT_GOOD = 0x00, + SAM_STAT_CHECK_CONDITION = 0x02, + SAM_STAT_CONDITION_MET = 0x04, + SAM_STAT_BUSY = 0x08, + SAM_STAT_INTERMEDIATE = 0x10, + SAM_STAT_INTERMEDIATE_CONDITION_MET = 0x14, + SAM_STAT_RESERVATION_CONFLICT = 0x18, + SAM_STAT_COMMAND_TERMINATED = 0x22, /* obsolete in SAM-3 */ + SAM_STAT_TASK_SET_FULL = 0x28, + SAM_STAT_ACA_ACTIVE = 0x30, + SAM_STAT_TASK_ABORTED = 0x40, +}; +#define STATUS_MASK 0xfe /* * SENSE KEYS */ @@ -341,4 +323,16 @@ enum zbc_zone_cond { ZBC_ZONE_COND_OFFLINE = 0xf, }; +/* Version descriptor values for INQUIRY */ +enum scsi_version_descriptor { + SCSI_VERSION_DESCRIPTOR_FCP4 = 0x0a40, + SCSI_VERSION_DESCRIPTOR_ISCSI = 0x0960, + SCSI_VERSION_DESCRIPTOR_SAM5 = 0x00a0, + SCSI_VERSION_DESCRIPTOR_SAS3 = 0x0c60, + SCSI_VERSION_DESCRIPTOR_SBC3 = 0x04c0, + SCSI_VERSION_DESCRIPTOR_SBP3 = 0x0980, + SCSI_VERSION_DESCRIPTOR_SPC4 = 0x0460, + SCSI_VERSION_DESCRIPTOR_SRP = 0x0940 +}; + #endif /* _SCSI_PROTO_H_ */ diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h index b06f28c74908..9129b23e12bc 100644 --- a/include/scsi/scsi_request.h +++ b/include/scsi/scsi_request.h @@ -28,6 +28,4 @@ static inline void scsi_req_free_cmd(struct scsi_request *req) kfree(req->cmd); } -void scsi_req_init(struct scsi_request *req); - #endif /* _SCSI_SCSI_REQUEST_H */ diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h new file mode 100644 index 000000000000..31d30cee1869 --- /dev/null +++ b/include/scsi/scsi_status.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _SCSI_SCSI_STATUS_H +#define _SCSI_SCSI_STATUS_H + +#include <linux/types.h> +#include <scsi/scsi_proto.h> + +/* Message codes. */ +enum scsi_msg_byte { + COMMAND_COMPLETE = 0x00, + EXTENDED_MESSAGE = 0x01, + SAVE_POINTERS = 0x02, + RESTORE_POINTERS = 0x03, + DISCONNECT = 0x04, + INITIATOR_ERROR = 0x05, + ABORT_TASK_SET = 0x06, + MESSAGE_REJECT = 0x07, + NOP = 0x08, + MSG_PARITY_ERROR = 0x09, + LINKED_CMD_COMPLETE = 0x0a, + LINKED_FLG_CMD_COMPLETE = 0x0b, + TARGET_RESET = 0x0c, + ABORT_TASK = 0x0d, + CLEAR_TASK_SET = 0x0e, + INITIATE_RECOVERY = 0x0f, /* SCSI-II only */ + RELEASE_RECOVERY = 0x10, /* SCSI-II only */ + TERMINATE_IO_PROC = 0x11, /* SCSI-II only */ + CLEAR_ACA = 0x16, + LOGICAL_UNIT_RESET = 0x17, + SIMPLE_QUEUE_TAG = 0x20, + HEAD_OF_QUEUE_TAG = 0x21, + ORDERED_QUEUE_TAG = 0x22, + IGNORE_WIDE_RESIDUE = 0x23, + ACA = 0x24, + QAS_REQUEST = 0x55, + + /* Old SCSI2 names, don't use in new code */ + BUS_DEVICE_RESET = TARGET_RESET, + ABORT = ABORT_TASK_SET, +}; + +/* Host byte codes. */ +enum scsi_host_status { + DID_OK = 0x00, /* NO error */ + DID_NO_CONNECT = 0x01, /* Couldn't connect before timeout period */ + DID_BUS_BUSY = 0x02, /* BUS stayed busy through time out period */ + DID_TIME_OUT = 0x03, /* TIMED OUT for other reason */ + DID_BAD_TARGET = 0x04, /* BAD target. */ + DID_ABORT = 0x05, /* Told to abort for some other reason */ + DID_PARITY = 0x06, /* Parity error */ + DID_ERROR = 0x07, /* Internal error */ + DID_RESET = 0x08, /* Reset by somebody. */ + DID_BAD_INTR = 0x09, /* Got an interrupt we weren't expecting. */ + DID_PASSTHROUGH = 0x0a, /* Force command past mid-layer */ + DID_SOFT_ERROR = 0x0b, /* The low level driver just wish a retry */ + DID_IMM_RETRY = 0x0c, /* Retry without decrementing retry count */ + DID_REQUEUE = 0x0d, /* Requeue command (no immediate retry) also + * without decrementing the retry count */ + DID_TRANSPORT_DISRUPTED = 0x0e, /* Transport error disrupted execution + * and the driver blocked the port to + * recover the link. Transport class will + * retry or fail IO */ + DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */ + DID_TARGET_FAILURE = 0x10, /* Permanent target failure, do not retry on + * other paths */ + DID_NEXUS_FAILURE = 0x11, /* Permanent nexus failure, retry on other + * paths might yield different results */ + DID_ALLOC_FAILURE = 0x12, /* Space allocation on the device failed */ + DID_MEDIUM_ERROR = 0x13, /* Medium error */ + DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */ +}; + +#endif /* _SCSI_SCSI_STATUS_H */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 14214ee121ad..e80a7c542c88 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -517,10 +517,11 @@ enum fc_host_event_code { * managed by the transport w/o driver interaction. */ +#define FC_VENDOR_IDENTIFIER 8 #define FC_FC4_LIST_SIZE 32 #define FC_SYMBOLIC_NAME_SIZE 256 #define FC_VERSION_STRING_SIZE 64 -#define FC_SERIAL_NUMBER_SIZE 80 +#define FC_SERIAL_NUMBER_SIZE 64 struct fc_host_attrs { /* Fixed Attributes */ @@ -532,6 +533,10 @@ struct fc_host_attrs { u32 supported_speeds; u32 maxframe_size; u16 max_npiv_vports; + u32 max_ct_payload; + u32 num_ports; + u32 num_discovered_ports; + u32 bootbios_state; char serial_number[FC_SERIAL_NUMBER_SIZE]; char manufacturer[FC_SERIAL_NUMBER_SIZE]; char model[FC_SYMBOLIC_NAME_SIZE]; @@ -540,6 +545,9 @@ struct fc_host_attrs { char driver_version[FC_VERSION_STRING_SIZE]; char firmware_version[FC_VERSION_STRING_SIZE]; char optionrom_version[FC_VERSION_STRING_SIZE]; + char vendor_identifier[FC_VENDOR_IDENTIFIER]; + char bootbios_version[FC_SYMBOLIC_NAME_SIZE]; + /* Dynamic Attributes */ u32 port_id; @@ -573,6 +581,9 @@ struct fc_host_attrs { /* bsg support */ struct request_queue *rqst_q; + + /* FDMI support version*/ + u8 fdmi_version; }; #define shost_to_fc_host(x) \ @@ -652,6 +663,18 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) #define fc_host_dev_loss_tmo(x) \ (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) +#define fc_host_max_ct_payload(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->max_ct_payload) +#define fc_host_vendor_identifier(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->vendor_identifier) +#define fc_host_num_discovered_ports(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->num_discovered_ports) +#define fc_host_num_ports(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->num_ports) +#define fc_host_bootbios_version(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->bootbios_version) +#define fc_host_bootbios_state(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->bootbios_state) /* The functions by which the transport class and the driver communicate */ struct fc_function_template { diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index fc5a39839b4b..c5d7810fd792 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -82,6 +82,7 @@ struct iscsi_transport { void (*destroy_session) (struct iscsi_cls_session *session); struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, uint32_t cid); + void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active); int (*bind_conn) (struct iscsi_cls_session *session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading); @@ -196,15 +197,23 @@ enum iscsi_connection_state { ISCSI_CONN_BOUND, }; +#define ISCSI_CLS_CONN_BIT_CLEANUP 1 + struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ - struct list_head conn_list_err; /* item in connlist_err */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; uint32_t cid; /* connection id */ + /* + * This protects the conn startup and binding/unbinding of the ep to + * the conn. Unbinding includes ep_disconnect and stop_conn. + */ struct mutex ep_mutex; struct iscsi_endpoint *ep; + unsigned long flags; + struct work_struct cleanup_work; + struct device dev; /* sysfs transport/container device */ enum iscsi_connection_state state; }; @@ -434,6 +443,8 @@ extern void iscsi_remove_session(struct iscsi_cls_session *session); extern void iscsi_free_session(struct iscsi_cls_session *session); extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, int dd_size, uint32_t cid); +extern void iscsi_put_conn(struct iscsi_cls_conn *conn); +extern void iscsi_get_conn(struct iscsi_cls_conn *conn); extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); extern void iscsi_unblock_session(struct iscsi_cls_session *session); extern void iscsi_block_session(struct iscsi_cls_session *session); @@ -441,6 +452,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); +extern void iscsi_put_endpoint(struct iscsi_endpoint *ep); extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *t, diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 7327e12f3373..843cefb8efce 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -131,6 +131,39 @@ struct compat_sg_io_hdr { #define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */ #define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */ +/* + * Obsolete DRIVER_SENSE driver byte + * + * Originally the SCSI midlayer would set the DRIVER_SENSE driver byte when + * a sense code was generated and a sense buffer was allocated. + * However, as nowadays every scsi command has a sense code allocated this + * distinction became moot as one could check the sense buffer directly. + * Consequently this byte is not set anymore from the midlayer, but SG will + * keep setting this byte to be compatible with previous releases. + */ +#define DRIVER_SENSE 0x08 +/* Obsolete driver_byte() declaration */ +#define driver_byte(result) (((result) >> 24) & 0xff) + +/* + * Original linux SCSI Status codes. They are shifted 1 bit right + * from those found in the SCSI standards. + */ + +#define GOOD 0x00 +#define CHECK_CONDITION 0x01 +#define CONDITION_GOOD 0x02 +#define BUSY 0x04 +#define INTERMEDIATE_GOOD 0x08 +#define INTERMEDIATE_C_GOOD 0x0a +#define RESERVATION_CONFLICT 0x0c +#define COMMAND_TERMINATED 0x11 +#define QUEUE_FULL 0x14 +#define ACA_ACTIVE 0x18 +#define TASK_ABORTED 0x20 + +/* Obsolete status_byte() declaration */ +#define status_byte(result) (((result) >> 1) & 0x7f) typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */ @@ -145,7 +178,7 @@ typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */ - char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */ + char orphan; /* 0 -> normal request, 1 -> from interrupted SG_IO */ char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */ char problem; /* 0 -> no problem detected, 1 -> error to report */ int pack_id; /* pack_id associated with request */ diff --git a/include/scsi/srp.h b/include/scsi/srp.h index 177d8026e96f..dfe0984b58a9 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h @@ -107,10 +107,10 @@ struct srp_direct_buf { * having the 20-byte structure padded to 24 bytes on 64-bit architectures. */ struct srp_indirect_buf { - struct srp_direct_buf table_desc; + struct srp_direct_buf table_desc __packed __aligned(4); __be32 len; - struct srp_direct_buf desc_list[]; -} __attribute__((packed)); + struct srp_direct_buf desc_list[] __packed __aligned(4); +}; /* Immediate data buffer descriptor as defined in SRP2. */ struct srp_imm_buf { @@ -175,13 +175,13 @@ struct srp_login_rsp { u8 opcode; u8 reserved1[3]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); __be32 max_it_iu_len; __be32 max_ti_iu_len; __be16 buf_fmt; u8 rsp_flags; u8 reserved2[25]; -} __attribute__((packed)); +}; struct srp_login_rej { u8 opcode; @@ -207,10 +207,6 @@ struct srp_t_logout { u64 tag; }; -/* - * We need the packed attribute because the SRP spec only aligns the - * 8-byte LUN field to 4 bytes. - */ struct srp_tsk_mgmt { u8 opcode; u8 sol_not; @@ -225,10 +221,6 @@ struct srp_tsk_mgmt { u8 reserved5[8]; }; -/* - * We need the packed attribute because the SRP spec only aligns the - * 8-byte LUN field to 4 bytes. - */ struct srp_cmd { u8 opcode; u8 sol_not; @@ -266,7 +258,7 @@ struct srp_rsp { u8 sol_not; u8 reserved1[2]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); u8 reserved2[2]; u8 flags; u8 status; @@ -275,7 +267,7 @@ struct srp_rsp { __be32 sense_data_len; __be32 resp_data_len; u8 data[]; -} __attribute__((packed)); +}; struct srp_cred_req { u8 opcode; @@ -301,13 +293,13 @@ struct srp_aer_req { u8 sol_not; u8 reserved[2]; __be32 req_lim_delta; - u64 tag; + u64 tag __packed __aligned(4); u32 reserved2; struct scsi_lun lun; __be32 sense_data_len; u32 reserved3; u8 sense_data[]; -} __attribute__((packed)); +}; struct srp_aer_rsp { u8 opcode; diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h new file mode 100644 index 000000000000..f6542584ca13 --- /dev/null +++ b/include/soc/at91/sama7-ddr.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Microchip SAMA7 UDDR Controller and DDR3 PHY Controller registers offsets + * and bit definitions. + * + * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries + * + * Author: Claudu Beznea <claudiu.beznea@microchip.com> + */ + +#ifndef __SAMA7_DDR_H__ +#define __SAMA7_DDR_H__ + +#ifdef CONFIG_SOC_SAMA7 + +/* DDR3PHY */ +#define DDR3PHY_PIR (0x04) /* DDR3PHY PHY Initialization Register */ +#define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */ +#define DDR3PHY_PIR_ITMSRST (1 << 4) /* Interface Timing Module Soft Reset */ +#define DDR3PHY_PIR_DLLLOCK (1 << 2) /* DLL Lock */ +#define DDR3PHY_PIR_DLLSRST (1 << 1) /* DLL Soft Rest */ +#define DDR3PHY_PIR_INIT (1 << 0) /* Initialization Trigger */ + +#define DDR3PHY_PGCR (0x08) /* DDR3PHY PHY General Configuration Register */ +#define DDR3PHY_PGCR_CKDV1 (1 << 13) /* CK# Disable Value */ +#define DDR3PHY_PGCR_CKDV0 (1 << 12) /* CK Disable Value */ + +#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */ +#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */ + +#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */ +#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */ +#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */ +#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */ + +#define DDR3PHY_DXCCR (0x28) /* DDR3PHY DATX8 Common Configuration Register */ +#define DDR3PHY_DXCCR_DXPDR (1 << 3) /* Data Power Down Receiver */ + +#define DDR3PHY_DSGCR (0x2C) /* DDR3PHY DDR System General Configuration Register */ +#define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */ + +#define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */ + +/* UDDRC */ +#define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */ +#define UDDRC_STAT_SELFREF_TYPE_DIS (0x0 << 4) /* SDRAM is not in Self-refresh */ +#define UDDRC_STAT_SELFREF_TYPE_PHY (0x1 << 4) /* SDRAM is in Self-refresh, which was caused by PHY Master Request */ +#define UDDRC_STAT_SELFREF_TYPE_SW (0x2 << 4) /* SDRAM is in Self-refresh, which was not caused solely under Automatic Self-refresh control */ +#define UDDRC_STAT_SELFREF_TYPE_AUTO (0x3 << 4) /* SDRAM is in Self-refresh, which was caused by Automatic Self-refresh only */ +#define UDDRC_STAT_SELFREF_TYPE_MSK (0x3 << 4) /* Self-refresh type mask */ +#define UDDRC_STAT_OPMODE_INIT (0x0 << 0) /* Init */ +#define UDDRC_STAT_OPMODE_NORMAL (0x1 << 0) /* Normal */ +#define UDDRC_STAT_OPMODE_PWRDOWN (0x2 << 0) /* Power-down */ +#define UDDRC_STAT_OPMODE_SELF_REFRESH (0x3 << 0) /* Self-refresh */ +#define UDDRC_STAT_OPMODE_MSK (0x7 << 0) /* Operating mode mask */ + +#define UDDRC_PWRCTL (0x30) /* UDDRC Low Power Control Register */ +#define UDDRC_PWRCTRL_SELFREF_SW (1 << 5) /* Software self-refresh */ + +#define UDDRC_DFIMISC (0x1B0) /* UDDRC DFI Miscellaneous Control Register */ +#define UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0) /* PHY initialization complete enable signal */ + +#define UDDRC_SWCTRL (0x320) /* UDDRC Software Register Programming Control Enable */ +#define UDDRC_SWCTRL_SW_DONE (1 << 0) /* Enable quasi-dynamic register programming outside reset */ + +#define UDDRC_SWSTAT (0x324) /* UDDRC Software Register Programming Control Status */ +#define UDDRC_SWSTAT_SW_DONE_ACK (1 << 0) /* Register programming done */ + +#define UDDRC_PSTAT (0x3FC) /* UDDRC Port Status Register */ +#define UDDRC_PSTAT_ALL_PORTS (0x1F001F) /* Read + writes outstanding transactions on all ports */ + +#define UDDRC_PCTRL_0 (0x490) /* UDDRC Port 0 Control Register */ +#define UDDRC_PCTRL_1 (0x540) /* UDDRC Port 1 Control Register */ +#define UDDRC_PCTRL_2 (0x5F0) /* UDDRC Port 2 Control Register */ +#define UDDRC_PCTRL_3 (0x6A0) /* UDDRC Port 3 Control Register */ +#define UDDRC_PCTRL_4 (0x750) /* UDDRC Port 4 Control Register */ + +#endif /* CONFIG_SOC_SAMA7 */ + +#endif /* __SAMA7_DDR_H__ */ diff --git a/include/soc/at91/sama7-sfrbu.h b/include/soc/at91/sama7-sfrbu.h new file mode 100644 index 000000000000..76b740810d34 --- /dev/null +++ b/include/soc/at91/sama7-sfrbu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Microchip SAMA7 SFRBU registers offsets and bit definitions. + * + * Copyright (C) [2020] Microchip Technology Inc. and its subsidiaries + * + * Author: Claudu Beznea <claudiu.beznea@microchip.com> + */ + +#ifndef __SAMA7_SFRBU_H__ +#define __SAMA7_SFRBU_H__ + +#ifdef CONFIG_SOC_SAMA7 + +#define AT91_SFRBU_PSWBU (0x00) /* SFRBU Power Switch BU Control Register */ +#define AT91_SFRBU_PSWBU_PSWKEY (0x4BD20C << 8) /* Specific value mandatory to allow writing of other register bits */ +#define AT91_SFRBU_PSWBU_STATE (1 << 2) /* Power switch BU state */ +#define AT91_SFRBU_PSWBU_SOFTSWITCH (1 << 1) /* Power switch BU source selection */ +#define AT91_SFRBU_PSWBU_CTRL (1 << 0) /* Power switch BU control */ + +#define AT91_SFRBU_25LDOCR (0x0C) /* SFRBU 2.5V LDO Control Register */ +#define AT91_SFRBU_25LDOCR_LDOANAKEY (0x3B6E18 << 8) /* Specific value mandatory to allow writing of other register bits. */ +#define AT91_SFRBU_25LDOCR_STATE (1 << 3) /* LDOANA Switch On/Off Control */ +#define AT91_SFRBU_25LDOCR_LP (1 << 2) /* LDOANA Low-Power Mode Control */ +#define AT91_SFRBU_PD_VALUE_MSK (0x3) +#define AT91_SFRBU_25LDOCR_PD_VALUE(v) ((v) & AT91_SFRBU_PD_VALUE_MSK) /* LDOANA Pull-down value */ + +#define AT91_FRBU_DDRPWR (0x10) /* SFRBU DDR Power Control Register */ +#define AT91_FRBU_DDRPWR_STATE (1 << 0) /* DDR Power Mode State */ + +#endif /* CONFIG_SOC_SAMA7 */ + +#endif /* __SAMA7_SFRBU_H__ */ + diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h index 42d6aeb951fa..0bf610acafd0 100644 --- a/include/soc/imx/cpu.h +++ b/include/soc/imx/cpu.h @@ -9,6 +9,7 @@ #define MXC_CPU_MX27 27 #define MXC_CPU_MX31 31 #define MXC_CPU_MX35 35 +#define MXC_CPU_MX50 50 #define MXC_CPU_MX51 51 #define MXC_CPU_MX53 53 #define MXC_CPU_IMX6SL 0x60 diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h new file mode 100644 index 000000000000..2b64c95f3be5 --- /dev/null +++ b/include/soc/microchip/mpfs.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Microchip PolarFire SoC (MPFS) + * + * Copyright (c) 2020 Microchip Corporation. All rights reserved. + * + * Author: Conor Dooley <conor.dooley@microchip.com> + * + */ + +#ifndef __SOC_MPFS_H__ +#define __SOC_MPFS_H__ + +#include <linux/types.h> +#include <linux/of_device.h> + +struct mpfs_sys_controller; + +struct mpfs_mss_msg { + u8 cmd_opcode; + u16 cmd_data_size; + struct mpfs_mss_response *response; + u8 *cmd_data; + u16 mbox_offset; + u16 resp_offset; +}; + +struct mpfs_mss_response { + u32 resp_status; + u32 *resp_msg; + u16 resp_size; +}; + +#if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) + +int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, void *msg); + +struct mpfs_sys_controller *mpfs_sys_controller_get(struct device_node *mailbox_node); + +#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */ + +#endif /* __SOC_MPFS_H__ */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 2f5ce4d4fdbf..06706a9fd5b1 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -589,6 +589,9 @@ enum ocelot_sb_pool { OCELOT_SB_POOL_NUM, }; +#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0) +#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1) + struct ocelot_port { struct ocelot *ocelot; @@ -798,19 +801,14 @@ void ocelot_init_port(struct ocelot *ocelot, int port); void ocelot_deinit_port(struct ocelot *ocelot, int port); /* DSA callbacks */ -void ocelot_port_enable(struct ocelot *ocelot, int port, - struct phy_device *phy); -void ocelot_port_disable(struct ocelot *ocelot, int port); void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data); void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data); int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset); int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct ethtool_ts_info *info); void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs); -int ocelot_port_flush(struct ocelot *ocelot, int port); -void ocelot_adjust_link(struct ocelot *ocelot, int port, - struct phy_device *phydev); -int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled); +int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled, + struct netlink_ext_ack *extack); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state); void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot); int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, @@ -828,7 +826,7 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port, int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid); int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, - bool untagged); + bool untagged, struct netlink_ext_ack *extack); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid); @@ -894,6 +892,18 @@ int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, enum devlink_sb_pool_type pool_type, u32 *p_cur, u32 *p_max); +void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + unsigned long quirks); +void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause, + unsigned long quirks); + #if IS_ENABLED(CONFIG_BRIDGE_MRP) int ocelot_mrp_add(struct ocelot *ocelot, int port, const struct switchdev_obj_mrp *mrp); diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h index 98027a76ce3d..af41ad80ec21 100644 --- a/include/soc/tegra/common.h +++ b/include/soc/tegra/common.h @@ -6,6 +6,37 @@ #ifndef __SOC_TEGRA_COMMON_H__ #define __SOC_TEGRA_COMMON_H__ +#include <linux/errno.h> +#include <linux/types.h> + +struct device; + +/** + * Tegra SoC core device OPP table configuration + * + * @init_state: pre-initialize OPP state of a device + */ +struct tegra_core_opp_params { + bool init_state; +}; + +#ifdef CONFIG_ARCH_TEGRA bool soc_is_tegra(void); +int devm_tegra_core_dev_init_opp_table(struct device *dev, + struct tegra_core_opp_params *params); +#else +static inline bool soc_is_tegra(void) +{ + return false; +} + +static inline int +devm_tegra_core_dev_init_opp_table(struct device *dev, + struct tegra_core_opp_params *params) +{ + return -ENODEV; +} +#endif + #endif /* __SOC_TEGRA_COMMON_H__ */ diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 78cbc787a4dc..990701f788bc 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -52,14 +52,28 @@ struct tegra_sku_info { enum tegra_revision revision; }; +#ifdef CONFIG_ARCH_TEGRA +extern struct tegra_sku_info tegra_sku_info; u32 tegra_read_straps(void); u32 tegra_read_ram_code(void); int tegra_fuse_readl(unsigned long offset, u32 *value); - -#ifdef CONFIG_ARCH_TEGRA -extern struct tegra_sku_info tegra_sku_info; #else static struct tegra_sku_info tegra_sku_info __maybe_unused; + +static inline u32 tegra_read_straps(void) +{ + return 0; +} + +static inline u32 tegra_read_ram_code(void) +{ + return 0; +} + +static inline int tegra_fuse_readl(unsigned long offset, u32 *value) +{ + return -ENODEV; +} #endif struct device *tegra_soc_device_register(void); diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index d2fbe6a8b25b..1066b1194a5a 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -10,6 +10,7 @@ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/interconnect-provider.h> +#include <linux/irq.h> #include <linux/reset-controller.h> #include <linux/types.h> @@ -17,34 +18,48 @@ struct clk; struct device; struct page; -struct tegra_smmu_enable { - unsigned int reg; - unsigned int bit; -}; - struct tegra_mc_timing { unsigned long rate; u32 *emem_data; }; -/* latency allowance */ -struct tegra_mc_la { - unsigned int reg; - unsigned int shift; - unsigned int mask; - unsigned int def; -}; - struct tegra_mc_client { unsigned int id; const char *name; - unsigned int swgroup; + /* + * For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the + * Tegra SMMU, whereas on Tegra186 and later this is the ID used to override the ARM SMMU + * stream ID used for IOVA translations for the given memory client. + */ + union { + unsigned int swgroup; + unsigned int sid; + }; unsigned int fifo_size; - struct tegra_smmu_enable smmu; - struct tegra_mc_la la; + struct { + /* Tegra SMMU enable (Tegra210 and earlier) */ + struct { + unsigned int reg; + unsigned int bit; + } smmu; + + /* latency allowance */ + struct { + unsigned int reg; + unsigned int shift; + unsigned int mask; + unsigned int def; + } la; + + /* stream ID overrides (Tegra186 and later) */ + struct { + unsigned int override; + unsigned int security; + } sid; + } regs; }; struct tegra_smmu_swgroup { @@ -155,6 +170,19 @@ struct tegra_mc_icc_ops { void *data); }; +struct tegra_mc_ops { + /* + * @probe: Callback to set up SoC-specific bits of the memory controller. This is called + * after basic, common set up that is done by the SoC-agnostic bits. + */ + int (*probe)(struct tegra_mc *mc); + void (*remove)(struct tegra_mc *mc); + int (*suspend)(struct tegra_mc *mc); + int (*resume)(struct tegra_mc *mc); + irqreturn_t (*handle_irq)(int irq, void *data); + int (*probe_device)(struct tegra_mc *mc, struct device *dev); +}; + struct tegra_mc_soc { const struct tegra_mc_client *clients; unsigned int num_clients; @@ -176,8 +204,7 @@ struct tegra_mc_soc { unsigned int num_resets; const struct tegra_mc_icc_ops *icc_ops; - - int (*init)(struct tegra_mc *mc); + const struct tegra_mc_ops *ops; }; struct tegra_mc { @@ -210,12 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc); #ifdef CONFIG_TEGRA_MC struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev); +int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev); #else static inline struct tegra_mc * devm_tegra_memory_controller_get(struct device *dev) { return ERR_PTR(-ENODEV); } + +static inline int +tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev) +{ + return -ENODEV; +} #endif #endif /* __SOC_TEGRA_MC_H__ */ diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h index 08477d7bfab9..433878927026 100644 --- a/include/soc/tegra/pm.h +++ b/include/soc/tegra/pm.h @@ -14,6 +14,7 @@ enum tegra_suspend_mode { TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */ TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */ TEGRA_MAX_SUSPEND_MODE, + TEGRA_SUSPEND_NOT_READY, }; #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) @@ -28,6 +29,7 @@ void tegra_pm_clear_cpu_in_lp2(void); void tegra_pm_set_cpu_in_lp2(void); int tegra_pm_enter_lp2(void); int tegra_pm_park_secondary_cpu(unsigned long cpu); +void tegra_pm_init_suspend(void); #else static inline enum tegra_suspend_mode tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode) @@ -61,6 +63,10 @@ static inline int tegra_pm_park_secondary_cpu(unsigned long cpu) { return -ENOTSUPP; } + +static inline void tegra_pm_init_suspend(void) +{ +} #endif /* CONFIG_PM_SLEEP */ #endif /* __SOC_TEGRA_PM_H__ */ diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 361cb64246f7..d186bccd125d 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -171,6 +171,8 @@ int tegra_io_rail_power_off(unsigned int id); void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); +bool tegra_pmc_core_domain_state_synced(void); + #else static inline int tegra_powergate_power_on(unsigned int id) { @@ -227,6 +229,11 @@ static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) { } +static inline bool tegra_pmc_core_domain_state_synced(void) +{ + return false; +} + #endif /* CONFIG_SOC_TEGRA_PMC */ #if defined(CONFIG_SOC_TEGRA_PMC) && defined(CONFIG_PM_SLEEP) diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index 277087f635f3..d91289c6f00e 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -165,8 +165,6 @@ struct snd_compr { }; /* compress device register APIs */ -int snd_compress_register(struct snd_compr *device); -int snd_compress_deregister(struct snd_compr *device); int snd_compress_new(struct snd_card *card, int device, int type, const char *id, struct snd_compr *compr); diff --git a/include/sound/core.h b/include/sound/core.h index 1f9aef0adbc9..b7e9b58d3c78 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -117,6 +117,8 @@ struct snd_card { struct device card_dev; /* cardX object for sysfs */ const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */ bool registered; /* card_dev is registered? */ + bool managed; /* managed via devres */ + bool releasing; /* during card free process */ int sync_irq; /* assigned irq, used for PCM sync */ wait_queue_head_t remove_sleep; @@ -128,7 +130,9 @@ struct snd_card { #ifdef CONFIG_PM unsigned int power_state; /* power state */ + atomic_t power_ref; wait_queue_head_t power_sleep; + wait_queue_head_t power_ref_sleep; #endif #if IS_ENABLED(CONFIG_SND_MIXER_OSS) @@ -142,21 +146,61 @@ struct snd_card { #ifdef CONFIG_PM static inline unsigned int snd_power_get_state(struct snd_card *card) { - return card->power_state; + return READ_ONCE(card->power_state); } static inline void snd_power_change_state(struct snd_card *card, unsigned int state) { - card->power_state = state; + WRITE_ONCE(card->power_state, state); wake_up(&card->power_sleep); } +/** + * snd_power_ref - Take the reference count for power control + * @card: sound card object + * + * The power_ref reference of the card is used for managing to block + * the snd_power_sync_ref() operation. This function increments the reference. + * The counterpart snd_power_unref() has to be called appropriately later. + */ +static inline void snd_power_ref(struct snd_card *card) +{ + atomic_inc(&card->power_ref); +} + +/** + * snd_power_unref - Release the reference count for power control + * @card: sound card object + */ +static inline void snd_power_unref(struct snd_card *card) +{ + if (atomic_dec_and_test(&card->power_ref)) + wake_up(&card->power_ref_sleep); +} + +/** + * snd_power_sync_ref - wait until the card power_ref is freed + * @card: sound card object + * + * This function is used to synchronize with the pending power_ref being + * released. + */ +static inline void snd_power_sync_ref(struct snd_card *card) +{ + wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref)); +} + /* init.c */ -int snd_power_wait(struct snd_card *card, unsigned int power_state); +int snd_power_wait(struct snd_card *card); +int snd_power_ref_and_wait(struct snd_card *card); #else /* ! CONFIG_PM */ -static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; } +static inline int snd_power_wait(struct snd_card *card) { return 0; } +static inline void snd_power_ref(struct snd_card *card) {} +static inline void snd_power_unref(struct snd_card *card) {} +static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; } +static inline void snd_power_sync_ref(struct snd_card *card) {} #define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; }) #define snd_power_change_state(card, state) do { (void)(card); } while (0) @@ -232,6 +276,9 @@ extern int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int cmd); int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret); +int snd_devm_card_new(struct device *parent, int idx, const char *xid, + struct module *module, size_t extra_size, + struct snd_card **card_ret); int snd_card_disconnect(struct snd_card *card); void snd_card_disconnect_sync(struct snd_card *card); @@ -282,6 +329,7 @@ int snd_device_get_state(struct snd_card *card, void *device_data); void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode); void snd_dma_disable(unsigned long dma); unsigned int snd_dma_pointer(unsigned long dma, unsigned int size); +int snd_devm_request_dma(struct device *dev, int dma, const char *name); #endif /* misc.c */ diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 468e38c54dd3..39787fecc8d9 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -1701,7 +1701,7 @@ struct snd_emu10k1 { struct snd_dma_buffer silent_page; /* silent page */ struct snd_dma_buffer ptb_pages; /* page table pages */ struct snd_dma_device p16v_dma_dev; - struct snd_dma_buffer p16v_buffer; + struct snd_dma_buffer *p16v_buffer; struct snd_util_memhdr *memhdr; /* page allocation list */ @@ -1796,14 +1796,12 @@ int snd_emu10k1_create(struct snd_card *card, unsigned short extout_mask, long max_cache_bytes, int enable_ir, - uint subsystem, - struct snd_emu10k1 ** remu); + uint subsystem); int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device); int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device); int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device); int snd_p16v_pcm(struct snd_emu10k1 *emu, int device); -int snd_p16v_free(struct snd_emu10k1 * emu); int snd_p16v_mixer(struct snd_emu10k1 * emu); int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device); int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 *emu, int device); diff --git a/include/sound/emu8000.h b/include/sound/emu8000.h index ad0365d6de5e..072791bbcf5c 100644 --- a/include/sound/emu8000.h +++ b/include/sound/emu8000.h @@ -56,9 +56,6 @@ struct snd_emu8000 { unsigned long port1; /* Port usually base+0 */ unsigned long port2; /* Port usually at base+0x400 */ unsigned long port3; /* Port usually at base+0x800 */ - struct resource *res_port1; - struct resource *res_port2; - struct resource *res_port3; unsigned short last_reg;/* Last register command */ spinlock_t reg_lock; diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 2e8d51937acd..01570dbda503 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -114,7 +114,6 @@ struct hda_codec_ops { int (*resume)(struct hda_codec *codec); int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid); #endif - void (*reboot_notify)(struct hda_codec *codec); void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on); }; diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index a125e3814b58..375581634143 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -51,7 +51,7 @@ enum hdac_ext_stream_type { * @decoupled: stream host and link is decoupled * @link_locked: link is locked * @link_prepared: link is prepared - * link_substream: link substream + * @link_substream: link substream */ struct hdac_ext_stream { struct hdac_stream hstream; diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 4b3a1d374b90..4fc733c8c570 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -65,13 +65,23 @@ struct hdmi_codec_ops { /* * Configures HDMI-encoder for audio stream. - * Mandatory + * Having either prepare or hw_params is mandatory. */ int (*hw_params)(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms); /* + * Configures HDMI-encoder for audio stream. Can be called + * multiple times for each setup. + * + * Having either prepare or hw_params is mandatory. + */ + int (*prepare)(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /* * Shuts down the audio stream. * Mandatory */ diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 5daa937684a4..b197e3f431c1 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -12,6 +12,7 @@ #include <asm/page.h> struct device; +struct vm_area_struct; /* * buffer device info @@ -30,13 +31,13 @@ struct snd_dma_device { #define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */ #define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */ #define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */ -#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */ +#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */ #ifdef CONFIG_SND_DMA_SGBUF #define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */ -#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */ +#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */ #else #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */ -#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC +#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC #endif #ifdef CONFIG_GENERIC_ALLOCATOR #define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */ @@ -64,84 +65,23 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size) return (size + PAGE_SIZE - 1) >> PAGE_SHIFT; } -#ifdef CONFIG_SND_DMA_SGBUF -/* - * Scatter-Gather generic device pages - */ -void *snd_malloc_sgbuf_pages(struct device *device, - size_t size, struct snd_dma_buffer *dmab, - size_t *res_size); -int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab); - -struct snd_sg_page { - void *buf; - dma_addr_t addr; -}; - -struct snd_sg_buf { - int size; /* allocated byte size */ - int pages; /* allocated pages */ - int tblsize; /* allocated table size */ - struct snd_sg_page *table; /* address table */ - struct page **page_table; /* page table (for vmap/vunmap) */ - struct device *dev; -}; - -/* - * return the physical address at the corresponding offset - */ -static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - dma_addr_t addr; - - if (!sgbuf) - return dmab->addr + offset; - addr = sgbuf->table[offset >> PAGE_SHIFT].addr; - addr &= ~((dma_addr_t)PAGE_SIZE - 1); - return addr + offset % PAGE_SIZE; -} - -/* - * return the virtual address at the corresponding offset - */ -static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - - if (!sgbuf) - return dmab->area + offset; - return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; -} - -unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, - unsigned int ofs, unsigned int size); -#else -/* non-SG versions */ -static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, - size_t offset) -{ - return dmab->addr + offset; -} - -static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, - size_t offset) -{ - return dmab->area + offset; -} - -#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size) - -#endif /* CONFIG_SND_DMA_SGBUF */ - /* allocate/release a buffer */ int snd_dma_alloc_pages(int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab); int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab); void snd_dma_free_pages(struct snd_dma_buffer *dmab); +int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, + struct vm_area_struct *area); + +dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset); +struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset); +unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, + unsigned int ofs, unsigned int size); + +/* device-managed memory allocator */ +struct snd_dma_buffer *snd_devm_alloc_pages(struct device *dev, int type, + size_t size); #endif /* __SOUND_MEMALLOC_H */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2e1200d17d0c..33451f8ff755 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1066,6 +1066,7 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, void snd_pcm_set_sync(struct snd_pcm_substream *substream); int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); +void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_period_elapsed(struct snd_pcm_substream *substream); snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *buf, bool interleaved, @@ -1203,11 +1204,48 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm, int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size); int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream); -void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type, - struct device *data, size_t size, size_t max); -void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, - struct device *data, - size_t size, size_t max); +int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type, + struct device *data, size_t size, size_t max); +int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, + struct device *data, + size_t size, size_t max); + +/** + * snd_pcm_set_fixed_buffer - Preallocate and set up the fixed size PCM buffer + * @substream: the pcm substream instance + * @type: DMA type (SNDRV_DMA_TYPE_*) + * @data: DMA type dependent data + * @size: the requested pre-allocation size in bytes + * + * This is a variant of snd_pcm_set_managed_buffer(), but this pre-allocates + * only the given sized buffer and doesn't allow re-allocation nor dynamic + * allocation of a larger buffer unlike the standard one. + * The function may return -ENOMEM error, hence the caller must check it. + */ +static inline int __must_check +snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type, + struct device *data, size_t size) +{ + return snd_pcm_set_managed_buffer(substream, type, data, size, 0); +} + +/** + * snd_pcm_set_fixed_buffer_all - Preallocate and set up the fixed size PCM buffer + * @pcm: the pcm instance + * @type: DMA type (SNDRV_DMA_TYPE_*) + * @data: DMA type dependent data + * @size: the requested pre-allocation size in bytes + * + * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for + * all substream. If any of allocation fails, it returns -ENOMEM, hence the + * caller must check the return value. + */ +static inline int __must_check +snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type, + struct device *data, size_t size) +{ + return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0); +} int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream, size_t size, gfp_t gfp_flags); @@ -1253,14 +1291,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) -#ifdef CONFIG_SND_DMA_SGBUF -/* - * SG-buffer handling - */ -#define snd_pcm_substream_sgbuf(substream) \ - snd_pcm_get_dma_buf(substream)->private_data -#endif /* SND_DMA_SGBUF */ - /** * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset * @substream: PCM substream @@ -1273,17 +1303,6 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) } /** - * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset - * @substream: PCM substream - * @ofs: byte offset - */ -static inline void * -snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs) -{ - return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs); -} - -/** * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the * contig. page from the given size * @substream: PCM substream diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h index 0939aa45e2fe..64e84441cde1 100644 --- a/include/sound/pcm_iec958.h +++ b/include/sound/pcm_iec958.h @@ -4,6 +4,14 @@ #include <linux/types.h> +int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len); + +int snd_pcm_fill_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs, + size_t len); + +int snd_pcm_fill_iec958_consumer_hw_params(struct snd_pcm_hw_params *params, + u8 *cs, size_t len); + int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs, size_t len); diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h index 0feaf16e6ac0..95100cff25d1 100644 --- a/include/sound/pxa2xx-lib.h +++ b/include/sound/pxa2xx-lib.h @@ -14,18 +14,12 @@ struct snd_soc_component; extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); -extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd); extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream); -extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream, - struct vm_area_struct *vma); -extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream); -extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm); -extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component, - struct snd_pcm *pcm); +extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm); extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd); extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component, @@ -35,8 +29,6 @@ extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component, extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); -extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component, - struct snd_pcm_substream *substream); extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component, struct snd_pcm_substream *substream); extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component, @@ -44,9 +36,6 @@ extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component, extern snd_pcm_uframes_t pxa2xx_soc_pcm_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream); -extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct vm_area_struct *vma); /* AC97 */ diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 334842daa904..989e1517332d 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -81,6 +81,8 @@ struct snd_rawmidi_substream { bool opened; /* open flag */ bool append; /* append flag (merge more streams) */ bool active_sensing; /* send active sensing when close */ + unsigned int framing; /* whether to frame input data */ + unsigned int clock_type; /* clock source to use for input framing */ int use_count; /* use counter (for output) */ size_t bytes; struct snd_rawmidi *rmidi; diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 0bc29c4516e7..0dcb361a98bb 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -36,6 +36,22 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_MSB SND_SOC_DAIFMT_LEFT_J #define SND_SOC_DAIFMT_LSB SND_SOC_DAIFMT_RIGHT_J +/* Describes the possible PCM format */ +/* + * use SND_SOC_DAI_FORMAT_xx as eash shift. + * see + * snd_soc_runtime_get_dai_fmt() + */ +#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT 0 +#define SND_SOC_POSSIBLE_DAIFMT_FORMAT_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_FORMAT_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_I2S (1 << SND_SOC_DAI_FORMAT_I2S) +#define SND_SOC_POSSIBLE_DAIFMT_RIGHT_J (1 << SND_SOC_DAI_FORMAT_RIGHT_J) +#define SND_SOC_POSSIBLE_DAIFMT_LEFT_J (1 << SND_SOC_DAI_FORMAT_LEFT_J) +#define SND_SOC_POSSIBLE_DAIFMT_DSP_A (1 << SND_SOC_DAI_FORMAT_DSP_A) +#define SND_SOC_POSSIBLE_DAIFMT_DSP_B (1 << SND_SOC_DAI_FORMAT_DSP_B) +#define SND_SOC_POSSIBLE_DAIFMT_AC97 (1 << SND_SOC_DAI_FORMAT_AC97) +#define SND_SOC_POSSIBLE_DAIFMT_PDM (1 << SND_SOC_DAI_FORMAT_PDM) + /* * DAI Clock gating. * @@ -45,6 +61,17 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */ #define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */ +/* Describes the possible PCM format */ +/* + * define GATED -> CONT. GATED will be selected if both are selected. + * see + * snd_soc_runtime_get_dai_fmt() + */ +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT 16 +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_MASK (0xFFFF << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_GATED (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CONT (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_SHIFT) + /* * DAI hardware signal polarity. * @@ -71,6 +98,14 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_IB_NF (3 << 8) /* invert BCLK + nor FRM */ #define SND_SOC_DAIFMT_IB_IF (4 << 8) /* invert BCLK + FRM */ +/* Describes the possible PCM format */ +#define SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT 32 +#define SND_SOC_POSSIBLE_DAIFMT_INV_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_NB_NF (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_NB_IF (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_IB_NF (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_IB_IF (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_INV_SHIFT) + /* * DAI hardware clock providers/consumers * @@ -89,6 +124,14 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC #define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC +/* Describes the possible PCM format */ +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48 +#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFP (0x1ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFP (0x2ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBP_CFC (0x4ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) +#define SND_SOC_POSSIBLE_DAIFMT_CBC_CFC (0x8ULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) + #define SND_SOC_DAIFMT_FORMAT_MASK 0x000f #define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0 #define SND_SOC_DAIFMT_INV_MASK 0x0f00 @@ -131,6 +174,8 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio); /* Digital Audio interface formatting */ +int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd); +u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority); int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt); int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, @@ -292,6 +337,16 @@ struct snd_soc_dai_ops { snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, struct snd_soc_dai *); + /* + * Format list for auto selection. + * Format will be increased if priority format was + * not selected. + * see + * snd_soc_dai_get_fmt() + */ + u64 *auto_selectable_formats; + int num_auto_selectable_formats; + /* bit field */ unsigned int no_capture_mute:1; }; diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h index 328cf763d9b4..4afd667e124c 100644 --- a/include/sound/soc-topology.h +++ b/include/sound/soc-topology.h @@ -54,7 +54,7 @@ struct snd_soc_dobj_control { /* dynamic widget object */ struct snd_soc_dobj_widget { - unsigned int kcontrol_type; /* kcontrol type: mixer, enum, bytes */ + unsigned int *kcontrol_type; /* kcontrol type: mixer, enum, bytes */ }; /* generic dynamic object - all dynamic objects belong to this struct */ diff --git a/include/sound/soc.h b/include/sound/soc.h index e746da996351..8e6dd8a257c5 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -712,6 +712,12 @@ struct snd_soc_dai_link { /* Do not create a PCM for this DAI link (Backend link) */ unsigned int ignore:1; + /* This flag will reorder stop sequence. By enabling this flag + * DMA controller stop sequence will be invoked first followed by + * CPU DAI driver stop sequence + */ + unsigned int stop_dma_first:1; + #ifdef CONFIG_SND_SOC_TOPOLOGY struct snd_soc_dobj dobj; /* For topology */ #endif @@ -1232,10 +1238,23 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname); int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname); -unsigned int snd_soc_of_parse_daifmt(struct device_node *np, - const char *prefix, - struct device_node **bitclkmaster, - struct device_node **framemaster); + +unsigned int snd_soc_daifmt_clock_provider_fliped(unsigned int dai_fmt); +unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame); + +unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix); +unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np, + const char *prefix, + struct device_node **bitclkmaster, + struct device_node **framemaster); +#define snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix) \ + snd_soc_daifmt_parse_clock_provider_raw(np, prefix, NULL, NULL) +#define snd_soc_daifmt_parse_clock_provider_as_phandle \ + snd_soc_daifmt_parse_clock_provider_raw +#define snd_soc_daifmt_parse_clock_provider_as_flag(np, prefix) \ + snd_soc_daifmt_clock_provider_from_bitmap( \ + snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix)) + int snd_soc_get_dai_id(struct device_node *ep); int snd_soc_get_dai_name(const struct of_phandle_args *args, const char **dai_name); diff --git a/include/sound/sof.h b/include/sound/sof.h index 502ed9b8d6a1..6a1cd8e783d8 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -101,5 +101,6 @@ struct sof_dev_desc { }; int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd); +int sof_dai_get_bclk(struct snd_soc_pcm_runtime *rtd); #endif diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 1f78b09bba55..675f3a1fe613 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -75,6 +75,7 @@ void target_backend_unregister(const struct target_backend_ops *); void target_complete_cmd(struct se_cmd *, u8); void target_set_cmd_data_length(struct se_cmd *, int); +void target_complete_cmd_with_sense(struct se_cmd *, u8, sense_reason_t); void target_complete_cmd_with_length(struct se_cmd *, u8, int); void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d1f7d2a45354..fb11c7693b25 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -171,7 +171,7 @@ enum tcm_sense_reason_table { TCM_WRITE_PROTECTED = R(0x0c), TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d), TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e), - TCM_CHECK_CONDITION_NOT_READY = R(0x0f), + TCM_RESERVATION_CONFLICT = R(0x10), TCM_ADDRESS_OUT_OF_RANGE = R(0x11), TCM_OUT_OF_RESOURCES = R(0x12), @@ -188,6 +188,10 @@ enum tcm_sense_reason_table { TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d), TCM_LUN_BUSY = R(0x1e), TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f), + TCM_ALUA_TG_PT_STANDBY = R(0x20), + TCM_ALUA_TG_PT_UNAVAILABLE = R(0x21), + TCM_ALUA_STATE_TRANSITION = R(0x22), + TCM_ALUA_OFFLINE = R(0x23), #undef R }; @@ -326,6 +330,7 @@ struct t10_wwn { char model[INQUIRY_MODEL_LEN + 1]; char revision[INQUIRY_REVISION_LEN + 1]; char unit_serial[INQUIRY_VPD_SERIAL_LEN]; + u32 company_id; spinlock_t t10_vpd_lock; struct se_device *t10_dev; struct config_group t10_wwn_group; @@ -452,10 +457,10 @@ enum target_core_dif_check { #define TCM_ACA_TAG 0x24 struct se_cmd { + /* Used for fail with specific sense codes */ + sense_reason_t sense_reason; /* SAM response code being sent to initiator */ u8 scsi_status; - u8 scsi_asc; - u8 scsi_ascq; u16 scsi_sense_length; unsigned unknown_data_length:1; bool state_active:1; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 3ccf591b2374..9f73ed2cf061 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,34 @@ enum afs_vl_operation { afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ }; +enum afs_cm_operation { + afs_CB_CallBack = 204, /* AFS break callback promises */ + afs_CB_InitCallBackState = 205, /* AFS initialise callback state */ + afs_CB_Probe = 206, /* AFS probe client */ + afs_CB_GetLock = 207, /* AFS get contents of CM lock table */ + afs_CB_GetCE = 208, /* AFS get cache file description */ + afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */ + afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */ + afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */ + afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */ +}; + +enum yfs_cm_operation { + yfs_CB_Probe = 206, /* YFS probe client */ + yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */ + yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */ + yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */ + yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */ + yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */ + yfs_CB_GetServerPrefs = 215, + yfs_CB_GetCellServDV = 216, + yfs_CB_GetLocalCell = 217, + yfs_CB_GetCacheConfig = 218, + yfs_CB_GetCellByNum = 65537, + yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */ + yfs_CB_CallBack = 64204, +}; + enum afs_edit_dir_op { afs_edit_dir_create, afs_edit_dir_create_error, @@ -436,6 +464,32 @@ enum afs_cb_break_reason { EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \ E_(afs_VL_GetCapabilities, "VL.GetCapabilities") +#define afs_cm_operations \ + EM(afs_CB_CallBack, "CB.CallBack") \ + EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \ + EM(afs_CB_Probe, "CB.Probe") \ + EM(afs_CB_GetLock, "CB.GetLock") \ + EM(afs_CB_GetCE, "CB.GetCE") \ + EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \ + EM(afs_CB_GetXStats, "CB.GetXStats") \ + EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \ + E_(afs_CB_ProbeUuid, "CB.ProbeUuid") + +#define yfs_cm_operations \ + EM(yfs_CB_Probe, "YFSCB.Probe") \ + EM(yfs_CB_GetLock, "YFSCB.GetLock") \ + EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \ + EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \ + EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \ + EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \ + EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \ + EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \ + EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \ + EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \ + EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \ + EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ + E_(yfs_CB_CallBack, "YFSCB.CallBack") + #define afs_edit_dir_ops \ EM(afs_edit_dir_create, "create") \ EM(afs_edit_dir_create_error, "c_fail") \ @@ -569,6 +623,8 @@ afs_server_traces; afs_cell_traces; afs_fs_operations; afs_vl_operations; +afs_cm_operations; +yfs_cm_operations; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; @@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call, TP_STRUCT__entry( __field(unsigned int, call ) - __field(const char *, name ) __field(u32, op ) + __field(u16, service_id ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->name = call->type->name; __entry->op = call->operation_ID; + __entry->service_id = call->service_id; ), - TP_printk("c=%08x %s o=%u", + TP_printk("c=%08x %s", __entry->call, - __entry->name, - __entry->op) + __entry->service_id == 2501 ? + __print_symbolic(__entry->op, yfs_cm_operations) : + __print_symbolic(__entry->op, afs_cm_operations)) ); TRACE_EVENT(afs_call, diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index a41dd8a0c730..8f58fd95efc7 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -94,13 +94,13 @@ struct btrfs_space_info; EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \ EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \ EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \ + EM( FLUSH_DELALLOC_FULL, "FLUSH_DELALLOC_FULL") \ EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \ EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \ EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ - EM( COMMIT_TRANS, "COMMIT_TRANS") \ - EMe(FORCE_COMMIT_TRANS, "FORCE_COMMIT_TRANS") + EMe(COMMIT_TRANS, "COMMIT_TRANS") /* * First define the enums in the above macros to be exported to userspace via @@ -654,34 +654,30 @@ DEFINE_EVENT(btrfs__writepage, __extent_writepage, TRACE_EVENT(btrfs_writepage_end_io_hook, - TP_PROTO(const struct page *page, u64 start, u64 end, int uptodate), + TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 end, + int uptodate), - TP_ARGS(page, start, end, uptodate), + TP_ARGS(inode, start, end, uptodate), TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( unsigned long, index ) __field( u64, start ) __field( u64, end ) __field( int, uptodate ) __field( u64, root_objectid ) ), - TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb), - __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host)); - __entry->index = page->index; + TP_fast_assign_btrfs(inode->root->fs_info, + __entry->ino = btrfs_ino(inode); __entry->start = start; __entry->end = end; __entry->uptodate = uptodate; - __entry->root_objectid = - BTRFS_I(page->mapping->host)->root->root_key.objectid; + __entry->root_objectid = inode->root->root_key.objectid; ), - TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu " - "end=%llu uptodate=%d", + TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d", show_root_type(__entry->root_objectid), - __entry->ino, __entry->index, - __entry->start, + __entry->ino, __entry->start, __entry->end, __entry->uptodate) ); @@ -1097,7 +1093,7 @@ TRACE_EVENT(btrfs_trigger_flush, __entry->flags = flags; __entry->bytes = bytes; __entry->flush = flush; - __assign_str(reason, reason) + __assign_str(reason, reason); ), TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu", @@ -2042,7 +2038,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, ); DECLARE_EVENT_CLASS(btrfs_dump_space_info, - TP_PROTO(const struct btrfs_fs_info *fs_info, + TP_PROTO(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *sinfo), TP_ARGS(fs_info, sinfo), @@ -2062,6 +2058,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info, __field( u64, delayed_refs_reserved ) __field( u64, delayed_reserved ) __field( u64, free_chunk_space ) + __field( u64, delalloc_bytes ) + __field( u64, ordered_bytes ) ), TP_fast_assign_btrfs(fs_info, @@ -2079,6 +2077,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info, __entry->delayed_refs_reserved = fs_info->delayed_refs_rsv.reserved; __entry->delayed_reserved = fs_info->delayed_block_rsv.reserved; __entry->free_chunk_space = atomic64_read(&fs_info->free_chunk_space); + __entry->delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); + __entry->ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); ), TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu " @@ -2086,7 +2086,8 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info, "bytes_may_use=%llu bytes_readonly=%llu " "reclaim_size=%llu clamp=%d global_reserved=%llu " "trans_reserved=%llu delayed_refs_reserved=%llu " - "delayed_reserved=%llu chunk_free_space=%llu", + "delayed_reserved=%llu chunk_free_space=%llu " + "delalloc_bytes=%llu ordered_bytes=%llu", __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS), __entry->total_bytes, __entry->bytes_used, __entry->bytes_pinned, __entry->bytes_reserved, @@ -2094,11 +2095,18 @@ DECLARE_EVENT_CLASS(btrfs_dump_space_info, __entry->reclaim_size, __entry->clamp, __entry->global_reserved, __entry->trans_reserved, __entry->delayed_refs_reserved, - __entry->delayed_reserved, __entry->free_chunk_space) + __entry->delayed_reserved, __entry->free_chunk_space, + __entry->delalloc_bytes, __entry->ordered_bytes) ); DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim, - TP_PROTO(const struct btrfs_fs_info *fs_info, + TP_PROTO(struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo), + TP_ARGS(fs_info, sinfo) +); + +DEFINE_EVENT(btrfs_dump_space_info, btrfs_fail_all_tickets, + TP_PROTO(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *sinfo), TP_ARGS(fs_info, sinfo) ); diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 5d9de24cb9c0..9a448fe9355d 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -78,20 +78,20 @@ TRACE_EVENT(cachefiles_ref, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, obj ) + __field(unsigned int, cookie ) __field(enum cachefiles_obj_ref_trace, why ) __field(int, usage ) ), TP_fast_assign( - __entry->obj = obj; - __entry->cookie = cookie; + __entry->obj = obj->fscache.debug_id; + __entry->cookie = cookie->debug_id; __entry->usage = usage; __entry->why = why; ), - TP_printk("c=%p o=%p u=%d %s", + TP_printk("c=%08x o=%08x u=%d %s", __entry->cookie, __entry->obj, __entry->usage, __print_symbolic(__entry->why, cachefiles_obj_ref_traces)) ); @@ -104,18 +104,18 @@ TRACE_EVENT(cachefiles_lookup, TP_ARGS(obj, de, inode), TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(struct inode *, inode ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->inode = inode; ), - TP_printk("o=%p d=%p i=%p", + TP_printk("o=%08x d=%p i=%p", __entry->obj, __entry->de, __entry->inode) ); @@ -126,18 +126,18 @@ TRACE_EVENT(cachefiles_mkdir, TP_ARGS(obj, de, ret), TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(int, ret ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->ret = ret; ), - TP_printk("o=%p d=%p r=%u", + TP_printk("o=%08x d=%p r=%u", __entry->obj, __entry->de, __entry->ret) ); @@ -148,18 +148,18 @@ TRACE_EVENT(cachefiles_create, TP_ARGS(obj, de, ret), TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(int, ret ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->ret = ret; ), - TP_printk("o=%p d=%p r=%u", + TP_printk("o=%08x d=%p r=%u", __entry->obj, __entry->de, __entry->ret) ); @@ -172,18 +172,18 @@ TRACE_EVENT(cachefiles_unlink, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(enum fscache_why_object_killed, why ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->why = why; ), - TP_printk("o=%p d=%p w=%s", + TP_printk("o=%08x d=%p w=%s", __entry->obj, __entry->de, __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) ); @@ -198,20 +198,20 @@ TRACE_EVENT(cachefiles_rename, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(struct dentry *, to ) __field(enum fscache_why_object_killed, why ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->to = to; __entry->why = why; ), - TP_printk("o=%p d=%p t=%p w=%s", + TP_printk("o=%08x d=%p t=%p w=%s", __entry->obj, __entry->de, __entry->to, __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) ); @@ -224,16 +224,16 @@ TRACE_EVENT(cachefiles_mark_active, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; ), - TP_printk("o=%p d=%p", + TP_printk("o=%08x d=%p", __entry->obj, __entry->de) ); @@ -246,22 +246,22 @@ TRACE_EVENT(cachefiles_wait_active, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) + __field(unsigned int, xobj ) __field(struct dentry *, de ) - __field(struct cachefiles_object *, xobj ) __field(u16, flags ) __field(u16, fsc_flags ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; - __entry->xobj = xobj; + __entry->xobj = xobj->fscache.debug_id; __entry->flags = xobj->flags; __entry->fsc_flags = xobj->fscache.flags; ), - TP_printk("o=%p d=%p wo=%p wf=%x wff=%x", + TP_printk("o=%08x d=%p wo=%08x wf=%x wff=%x", __entry->obj, __entry->de, __entry->xobj, __entry->flags, __entry->fsc_flags) ); @@ -275,18 +275,18 @@ TRACE_EVENT(cachefiles_mark_inactive, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(struct inode *, inode ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->inode = inode; ), - TP_printk("o=%p d=%p i=%p", + TP_printk("o=%08x d=%p i=%p", __entry->obj, __entry->de, __entry->inode) ); @@ -299,18 +299,18 @@ TRACE_EVENT(cachefiles_mark_buried, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(struct cachefiles_object *, obj ) + __field(unsigned int, obj ) __field(struct dentry *, de ) __field(enum fscache_why_object_killed, why ) ), TP_fast_assign( - __entry->obj = obj; + __entry->obj = obj->fscache.debug_id; __entry->de = de; __entry->why = why; ), - TP_printk("o=%p d=%p w=%s", + TP_printk("o=%08x d=%p w=%s", __entry->obj, __entry->de, __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) ); diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index c3d354702cb0..3d708dae1542 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, __entry->align = align; ), - TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u", + TP_printk("name=%s pfn=0x%lx page=%p count=%lu align=%u", __get_str(name), __entry->pfn, __entry->page, @@ -60,7 +60,7 @@ TRACE_EVENT(cma_release, __entry->count = count; ), - TP_printk("name=%s pfn=%lx page=%p count=%lu", + TP_printk("name=%s pfn=0x%lx page=%p count=%lu", __get_str(name), __entry->pfn, __entry->page, diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h new file mode 100644 index 000000000000..2f422f4f1fb9 --- /dev/null +++ b/include/trace/events/damon.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM damon + +#if !defined(_TRACE_DAMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DAMON_H + +#include <linux/damon.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(damon_aggregated, + + TP_PROTO(struct damon_target *t, struct damon_region *r, + unsigned int nr_regions), + + TP_ARGS(t, r, nr_regions), + + TP_STRUCT__entry( + __field(unsigned long, target_id) + __field(unsigned int, nr_regions) + __field(unsigned long, start) + __field(unsigned long, end) + __field(unsigned int, nr_accesses) + ), + + TP_fast_assign( + __entry->target_id = t->id; + __entry->nr_regions = nr_regions; + __entry->start = r->ar.start; + __entry->end = r->ar.end; + __entry->nr_accesses = r->nr_accesses; + ), + + TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u", + __entry->target_id, __entry->nr_regions, + __entry->start, __entry->end, __entry->nr_accesses) +); + +#endif /* _TRACE_DAMON_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 64e92d56c6a8..3963e79ca7b4 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence, ), TP_fast_assign( - __assign_str(driver, fence->ops->get_driver_name(fence)) - __assign_str(timeline, fence->ops->get_timeline_name(fence)) + __assign_str(driver, fence->ops->get_driver_name(fence)); + __assign_str(timeline, fence->ops->get_timeline_name(fence)); __entry->context = fence->context; __entry->seqno = fence->seqno; ), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 56b113e3cd6a..4e881d91c874 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1818,6 +1818,7 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end, TP_ARGS(inode, cluster_idx, compressed_size, ret) ); +#ifdef CONFIG_F2FS_IOSTAT TRACE_EVENT(f2fs_iostat, TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat), @@ -1894,6 +1895,102 @@ TRACE_EVENT(f2fs_iostat, __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); +#ifndef __F2FS_IOSTAT_LATENCY_TYPE +#define __F2FS_IOSTAT_LATENCY_TYPE +struct f2fs_iostat_latency { + unsigned int peak_lat; + unsigned int avg_lat; + unsigned int cnt; +}; +#endif /* __F2FS_IOSTAT_LATENCY_TYPE */ + +TRACE_EVENT(f2fs_iostat_latency, + + TP_PROTO(struct f2fs_sb_info *sbi, struct f2fs_iostat_latency (*iostat_lat)[NR_PAGE_TYPE]), + + TP_ARGS(sbi, iostat_lat), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, d_rd_peak) + __field(unsigned int, d_rd_avg) + __field(unsigned int, d_rd_cnt) + __field(unsigned int, n_rd_peak) + __field(unsigned int, n_rd_avg) + __field(unsigned int, n_rd_cnt) + __field(unsigned int, m_rd_peak) + __field(unsigned int, m_rd_avg) + __field(unsigned int, m_rd_cnt) + __field(unsigned int, d_wr_s_peak) + __field(unsigned int, d_wr_s_avg) + __field(unsigned int, d_wr_s_cnt) + __field(unsigned int, n_wr_s_peak) + __field(unsigned int, n_wr_s_avg) + __field(unsigned int, n_wr_s_cnt) + __field(unsigned int, m_wr_s_peak) + __field(unsigned int, m_wr_s_avg) + __field(unsigned int, m_wr_s_cnt) + __field(unsigned int, d_wr_as_peak) + __field(unsigned int, d_wr_as_avg) + __field(unsigned int, d_wr_as_cnt) + __field(unsigned int, n_wr_as_peak) + __field(unsigned int, n_wr_as_avg) + __field(unsigned int, n_wr_as_cnt) + __field(unsigned int, m_wr_as_peak) + __field(unsigned int, m_wr_as_avg) + __field(unsigned int, m_wr_as_cnt) + ), + + TP_fast_assign( + __entry->dev = sbi->sb->s_dev; + __entry->d_rd_peak = iostat_lat[0][DATA].peak_lat; + __entry->d_rd_avg = iostat_lat[0][DATA].avg_lat; + __entry->d_rd_cnt = iostat_lat[0][DATA].cnt; + __entry->n_rd_peak = iostat_lat[0][NODE].peak_lat; + __entry->n_rd_avg = iostat_lat[0][NODE].avg_lat; + __entry->n_rd_cnt = iostat_lat[0][NODE].cnt; + __entry->m_rd_peak = iostat_lat[0][META].peak_lat; + __entry->m_rd_avg = iostat_lat[0][META].avg_lat; + __entry->m_rd_cnt = iostat_lat[0][META].cnt; + __entry->d_wr_s_peak = iostat_lat[1][DATA].peak_lat; + __entry->d_wr_s_avg = iostat_lat[1][DATA].avg_lat; + __entry->d_wr_s_cnt = iostat_lat[1][DATA].cnt; + __entry->n_wr_s_peak = iostat_lat[1][NODE].peak_lat; + __entry->n_wr_s_avg = iostat_lat[1][NODE].avg_lat; + __entry->n_wr_s_cnt = iostat_lat[1][NODE].cnt; + __entry->m_wr_s_peak = iostat_lat[1][META].peak_lat; + __entry->m_wr_s_avg = iostat_lat[1][META].avg_lat; + __entry->m_wr_s_cnt = iostat_lat[1][META].cnt; + __entry->d_wr_as_peak = iostat_lat[2][DATA].peak_lat; + __entry->d_wr_as_avg = iostat_lat[2][DATA].avg_lat; + __entry->d_wr_as_cnt = iostat_lat[2][DATA].cnt; + __entry->n_wr_as_peak = iostat_lat[2][NODE].peak_lat; + __entry->n_wr_as_avg = iostat_lat[2][NODE].avg_lat; + __entry->n_wr_as_cnt = iostat_lat[2][NODE].cnt; + __entry->m_wr_as_peak = iostat_lat[2][META].peak_lat; + __entry->m_wr_as_avg = iostat_lat[2][META].avg_lat; + __entry->m_wr_as_cnt = iostat_lat[2][META].cnt; + ), + + TP_printk("dev = (%d,%d), " + "iotype [peak lat.(ms)/avg lat.(ms)/count], " + "rd_data [%u/%u/%u], rd_node [%u/%u/%u], rd_meta [%u/%u/%u], " + "wr_sync_data [%u/%u/%u], wr_sync_node [%u/%u/%u], " + "wr_sync_meta [%u/%u/%u], wr_async_data [%u/%u/%u], " + "wr_async_node [%u/%u/%u], wr_async_meta [%u/%u/%u]", + show_dev(__entry->dev), + __entry->d_rd_peak, __entry->d_rd_avg, __entry->d_rd_cnt, + __entry->n_rd_peak, __entry->n_rd_avg, __entry->n_rd_cnt, + __entry->m_rd_peak, __entry->m_rd_avg, __entry->m_rd_cnt, + __entry->d_wr_s_peak, __entry->d_wr_s_avg, __entry->d_wr_s_cnt, + __entry->n_wr_s_peak, __entry->n_wr_s_avg, __entry->n_wr_s_cnt, + __entry->m_wr_s_peak, __entry->m_wr_s_avg, __entry->m_wr_s_cnt, + __entry->d_wr_as_peak, __entry->d_wr_as_avg, __entry->d_wr_as_cnt, + __entry->n_wr_as_peak, __entry->n_wr_as_avg, __entry->n_wr_as_cnt, + __entry->m_wr_as_peak, __entry->m_wr_as_avg, __entry->m_wr_as_cnt) +); +#endif + TRACE_EVENT(f2fs_bmap, TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock), diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h index 796053e162d2..c47b63db124e 100644 --- a/include/trace/events/filemap.h +++ b/include/trace/events/filemap.h @@ -36,7 +36,7 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, __entry->s_dev = page->mapping->host->i_rdev; ), - TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", + TP_printk("dev %d:%d ino %lx page=%p pfn=0x%lx ofs=%lu", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, pfn_to_page(__entry->pfn), diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h index d16fe6ed78a2..446392f5ba83 100644 --- a/include/trace/events/fscache.h +++ b/include/trace/events/fscache.h @@ -160,37 +160,27 @@ fscache_cookie_traces; TRACE_EVENT(fscache_cookie, - TP_PROTO(struct fscache_cookie *cookie, - enum fscache_cookie_trace where, - int usage), + TP_PROTO(unsigned int cookie_debug_id, + int ref, + enum fscache_cookie_trace where), - TP_ARGS(cookie, where, usage), + TP_ARGS(cookie_debug_id, ref, where), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_cookie *, parent ) + __field(unsigned int, cookie ) __field(enum fscache_cookie_trace, where ) - __field(int, usage ) - __field(int, n_children ) - __field(int, n_active ) - __field(u8, flags ) + __field(int, ref ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->parent = cookie->parent; + __entry->cookie = cookie_debug_id; __entry->where = where; - __entry->usage = usage; - __entry->n_children = atomic_read(&cookie->n_children); - __entry->n_active = atomic_read(&cookie->n_active); - __entry->flags = cookie->flags; + __entry->ref = ref; ), - TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x", + TP_printk("%s c=%08x r=%d", __print_symbolic(__entry->where, fscache_cookie_traces), - __entry->cookie, __entry->usage, - __entry->parent, __entry->n_children, __entry->n_active, - __entry->flags) + __entry->cookie, __entry->ref) ); TRACE_EVENT(fscache_netfs, @@ -199,17 +189,17 @@ TRACE_EVENT(fscache_netfs, TP_ARGS(netfs), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) __array(char, name, 8 ) ), TP_fast_assign( - __entry->cookie = netfs->primary_index; + __entry->cookie = netfs->primary_index->debug_id; strncpy(__entry->name, netfs->name, 8); __entry->name[7] = 0; ), - TP_printk("c=%p n=%s", + TP_printk("c=%08x n=%s", __entry->cookie, __entry->name) ); @@ -219,26 +209,26 @@ TRACE_EVENT(fscache_acquire, TP_ARGS(cookie), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_cookie *, parent ) + __field(unsigned int, cookie ) + __field(unsigned int, parent ) __array(char, name, 8 ) - __field(int, p_usage ) + __field(int, p_ref ) __field(int, p_n_children ) __field(u8, p_flags ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->parent = cookie->parent; - __entry->p_usage = atomic_read(&cookie->parent->usage); + __entry->cookie = cookie->debug_id; + __entry->parent = cookie->parent->debug_id; + __entry->p_ref = refcount_read(&cookie->parent->ref); __entry->p_n_children = atomic_read(&cookie->parent->n_children); __entry->p_flags = cookie->parent->flags; memcpy(__entry->name, cookie->def->name, 8); __entry->name[7] = 0; ), - TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s", - __entry->cookie, __entry->parent, __entry->p_usage, + TP_printk("c=%08x p=%08x pr=%d pc=%d pf=%02x n=%s", + __entry->cookie, __entry->parent, __entry->p_ref, __entry->p_n_children, __entry->p_flags, __entry->name) ); @@ -248,9 +238,9 @@ TRACE_EVENT(fscache_relinquish, TP_ARGS(cookie, retire), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_cookie *, parent ) - __field(int, usage ) + __field(unsigned int, cookie ) + __field(unsigned int, parent ) + __field(int, ref ) __field(int, n_children ) __field(int, n_active ) __field(u8, flags ) @@ -258,17 +248,17 @@ TRACE_EVENT(fscache_relinquish, ), TP_fast_assign( - __entry->cookie = cookie; - __entry->parent = cookie->parent; - __entry->usage = atomic_read(&cookie->usage); + __entry->cookie = cookie->debug_id; + __entry->parent = cookie->parent->debug_id; + __entry->ref = refcount_read(&cookie->ref); __entry->n_children = atomic_read(&cookie->n_children); __entry->n_active = atomic_read(&cookie->n_active); __entry->flags = cookie->flags; __entry->retire = retire; ), - TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u", - __entry->cookie, __entry->usage, + TP_printk("c=%08x r=%d p=%08x Nc=%d Na=%d f=%02x r=%u", + __entry->cookie, __entry->ref, __entry->parent, __entry->n_children, __entry->n_active, __entry->flags, __entry->retire) ); @@ -279,23 +269,23 @@ TRACE_EVENT(fscache_enable, TP_ARGS(cookie), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(int, usage ) + __field(unsigned int, cookie ) + __field(int, ref ) __field(int, n_children ) __field(int, n_active ) __field(u8, flags ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->usage = atomic_read(&cookie->usage); + __entry->cookie = cookie->debug_id; + __entry->ref = refcount_read(&cookie->ref); __entry->n_children = atomic_read(&cookie->n_children); __entry->n_active = atomic_read(&cookie->n_active); __entry->flags = cookie->flags; ), - TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x", - __entry->cookie, __entry->usage, + TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x", + __entry->cookie, __entry->ref, __entry->n_children, __entry->n_active, __entry->flags) ); @@ -305,23 +295,23 @@ TRACE_EVENT(fscache_disable, TP_ARGS(cookie), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(int, usage ) + __field(unsigned int, cookie ) + __field(int, ref ) __field(int, n_children ) __field(int, n_active ) __field(u8, flags ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->usage = atomic_read(&cookie->usage); + __entry->cookie = cookie->debug_id; + __entry->ref = refcount_read(&cookie->ref); __entry->n_children = atomic_read(&cookie->n_children); __entry->n_active = atomic_read(&cookie->n_active); __entry->flags = cookie->flags; ), - TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x", - __entry->cookie, __entry->usage, + TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x", + __entry->cookie, __entry->ref, __entry->n_children, __entry->n_active, __entry->flags) ); @@ -333,8 +323,8 @@ TRACE_EVENT(fscache_osm, TP_ARGS(object, state, wait, oob, event_num), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_object *, object ) + __field(unsigned int, cookie ) + __field(unsigned int, object ) __array(char, state, 8 ) __field(bool, wait ) __field(bool, oob ) @@ -342,15 +332,15 @@ TRACE_EVENT(fscache_osm, ), TP_fast_assign( - __entry->cookie = object->cookie; - __entry->object = object; + __entry->cookie = object->cookie->debug_id; + __entry->object = object->debug_id; __entry->wait = wait; __entry->oob = oob; __entry->event_num = event_num; memcpy(__entry->state, state->short_name, 8); ), - TP_printk("c=%p o=%p %s %s%sev=%d", + TP_printk("c=%08x o=%08d %s %s%sev=%d", __entry->cookie, __entry->object, __entry->state, @@ -370,18 +360,18 @@ TRACE_EVENT(fscache_page, TP_ARGS(cookie, page, why), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) __field(pgoff_t, page ) __field(enum fscache_page_trace, why ) ), TP_fast_assign( - __entry->cookie = cookie; + __entry->cookie = cookie->debug_id; __entry->page = page->index; __entry->why = why; ), - TP_printk("c=%p %s pg=%lx", + TP_printk("c=%08x %s pg=%lx", __entry->cookie, __print_symbolic(__entry->why, fscache_page_traces), __entry->page) @@ -394,20 +384,20 @@ TRACE_EVENT(fscache_check_page, TP_ARGS(cookie, page, val, n), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) __field(void *, page ) __field(void *, val ) __field(int, n ) ), TP_fast_assign( - __entry->cookie = cookie; + __entry->cookie = cookie->debug_id; __entry->page = page; __entry->val = val; __entry->n = n; ), - TP_printk("c=%p pg=%p val=%p n=%d", + TP_printk("c=%08x pg=%p val=%p n=%d", __entry->cookie, __entry->page, __entry->val, __entry->n) ); @@ -417,14 +407,14 @@ TRACE_EVENT(fscache_wake_cookie, TP_ARGS(cookie), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) ), TP_fast_assign( - __entry->cookie = cookie; + __entry->cookie = cookie->debug_id; ), - TP_printk("c=%p", __entry->cookie) + TP_printk("c=%08x", __entry->cookie) ); TRACE_EVENT(fscache_op, @@ -434,18 +424,18 @@ TRACE_EVENT(fscache_op, TP_ARGS(cookie, op, why), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_operation *, op ) + __field(unsigned int, cookie ) + __field(unsigned int, op ) __field(enum fscache_op_trace, why ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->op = op; + __entry->cookie = cookie ? cookie->debug_id : 0; + __entry->op = op->debug_id; __entry->why = why; ), - TP_printk("c=%p op=%p %s", + TP_printk("c=%08x op=%08x %s", __entry->cookie, __entry->op, __print_symbolic(__entry->why, fscache_op_traces)) ); @@ -457,20 +447,20 @@ TRACE_EVENT(fscache_page_op, TP_ARGS(cookie, page, op, what), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) + __field(unsigned int, op ) __field(pgoff_t, page ) - __field(struct fscache_operation *, op ) __field(enum fscache_page_op_trace, what ) ), TP_fast_assign( - __entry->cookie = cookie; + __entry->cookie = cookie->debug_id; __entry->page = page ? page->index : 0; - __entry->op = op; + __entry->op = op->debug_id; __entry->what = what; ), - TP_printk("c=%p %s pg=%lx op=%p", + TP_printk("c=%08x %s pg=%lx op=%08x", __entry->cookie, __print_symbolic(__entry->what, fscache_page_op_traces), __entry->page, __entry->op) @@ -483,20 +473,20 @@ TRACE_EVENT(fscache_wrote_page, TP_ARGS(cookie, page, op, ret), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) + __field(unsigned int, cookie ) + __field(unsigned int, op ) __field(pgoff_t, page ) - __field(struct fscache_operation *, op ) __field(int, ret ) ), TP_fast_assign( - __entry->cookie = cookie; + __entry->cookie = cookie->debug_id; __entry->page = page->index; - __entry->op = op; + __entry->op = op->debug_id; __entry->ret = ret; ), - TP_printk("c=%p pg=%lx op=%p ret=%d", + TP_printk("c=%08x pg=%lx op=%08x ret=%d", __entry->cookie, __entry->page, __entry->op, __entry->ret) ); @@ -507,22 +497,22 @@ TRACE_EVENT(fscache_gang_lookup, TP_ARGS(cookie, op, results, n, store_limit), TP_STRUCT__entry( - __field(struct fscache_cookie *, cookie ) - __field(struct fscache_operation *, op ) + __field(unsigned int, cookie ) + __field(unsigned int, op ) __field(pgoff_t, results0 ) __field(int, n ) __field(pgoff_t, store_limit ) ), TP_fast_assign( - __entry->cookie = cookie; - __entry->op = op; + __entry->cookie = cookie->debug_id; + __entry->op = op->debug_id; __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1; __entry->n = n; __entry->store_limit = store_limit; ), - TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx", + TP_printk("c=%08x op=%08x r0=%lx n=%d sl=%lx", __entry->cookie, __entry->op, __entry->results0, __entry->n, __entry->store_limit) ); diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h index d233f2916584..e5c1ca6d16ee 100644 --- a/include/trace/events/intel_iommu.h +++ b/include/trace/events/intel_iommu.h @@ -15,6 +15,8 @@ #include <linux/tracepoint.h> #include <linux/intel-iommu.h> +#define MSG_MAX 256 + TRACE_EVENT(qi_submit, TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), @@ -51,6 +53,41 @@ TRACE_EVENT(qi_submit, __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3 ) ); + +TRACE_EVENT(prq_report, + TP_PROTO(struct intel_iommu *iommu, struct device *dev, + u64 dw0, u64 dw1, u64 dw2, u64 dw3, + unsigned long seq), + + TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq), + + TP_STRUCT__entry( + __field(u64, dw0) + __field(u64, dw1) + __field(u64, dw2) + __field(u64, dw3) + __field(unsigned long, seq) + __string(iommu, iommu->name) + __string(dev, dev_name(dev)) + __dynamic_array(char, buff, MSG_MAX) + ), + + TP_fast_assign( + __entry->dw0 = dw0; + __entry->dw1 = dw1; + __entry->dw2 = dw2; + __entry->dw3 = dw3; + __entry->seq = seq; + __assign_str(iommu, iommu->name); + __assign_str(dev, dev_name(dev)); + ), + + TP_printk("%s/%s seq# %ld: %s", + __get_str(iommu), __get_str(dev), __entry->seq, + decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0, + __entry->dw1, __entry->dw2, __entry->dw3) + ) +); #endif /* _TRACE_INTEL_IOMMU_H */ /* This part must be outside protection */ diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index abb8b24744fd..0dd30de00e5b 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -12,11 +12,11 @@ struct io_wq_work; /** * io_uring_create - called after a new io_uring context was prepared * - * @fd: corresponding file descriptor - * @ctx: pointer to a ring context structure + * @fd: corresponding file descriptor + * @ctx: pointer to a ring context structure * @sq_entries: actual SQ size * @cq_entries: actual CQ size - * @flags: SQ ring flags, provided to io_uring_setup(2) + * @flags: SQ ring flags, provided to io_uring_setup(2) * * Allows to trace io_uring creation and provide pointer to a context, that can * be used later to find correlated events. @@ -52,12 +52,12 @@ TRACE_EVENT(io_uring_create, * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * - * @ctx: pointer to a ring context structure - * @opcode: describes which operation to perform + * @ctx: pointer to a ring context structure + * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers * @cq_ev_fd: whether eventfs registered or not - * @ret: return code + * @ret: return code * * Allows to trace fixed files/buffers/eventfds, that could be registered to * avoid an overhead of getting references to them for every operation. This @@ -142,16 +142,16 @@ TRACE_EVENT(io_uring_queue_async_work, TP_ARGS(ctx, rw, req, work, flags), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, rw ) - __field( void *, req ) + __field( void *, ctx ) + __field( int, rw ) + __field( void *, req ) __field( struct io_wq_work *, work ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->ctx = ctx; - __entry->rw = rw; + __entry->rw = rw; __entry->req = req; __entry->work = work; __entry->flags = flags; @@ -196,10 +196,10 @@ TRACE_EVENT(io_uring_defer, /** * io_uring_link - called before the io_uring request added into link_list of - * another request + * another request * - * @ctx: pointer to a ring context structure - * @req: pointer to a linked request + * @ctx: pointer to a ring context structure + * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * * Allows to track linked requests, to understand dependencies between requests @@ -212,8 +212,8 @@ TRACE_EVENT(io_uring_link, TP_ARGS(ctx, req, target_req), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) + __field( void *, ctx ) + __field( void *, req ) __field( void *, target_req ) ), @@ -244,7 +244,7 @@ TRACE_EVENT(io_uring_cqring_wait, TP_ARGS(ctx, min_events), TP_STRUCT__entry ( - __field( void *, ctx ) + __field( void *, ctx ) __field( int, min_events ) ), @@ -272,7 +272,7 @@ TRACE_EVENT(io_uring_fail_link, TP_ARGS(req, link), TP_STRUCT__entry ( - __field( void *, req ) + __field( void *, req ) __field( void *, link ) ), @@ -295,14 +295,14 @@ TRACE_EVENT(io_uring_fail_link, */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, u64 user_data, long res, unsigned cflags), + TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags), TP_ARGS(ctx, user_data, res, cflags), TP_STRUCT__entry ( __field( void *, ctx ) __field( u64, user_data ) - __field( long, res ) + __field( int, res ) __field( unsigned, cflags ) ), @@ -313,18 +313,19 @@ TRACE_EVENT(io_uring_complete, __entry->cflags = cflags; ), - TP_printk("ring %p, user_data 0x%llx, result %ld, cflags %x", + TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x", __entry->ctx, (unsigned long long)__entry->user_data, __entry->res, __entry->cflags) ); - /** * io_uring_submit_sqe - called before submitting one SQE * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request * @opcode: opcode of request * @user_data: user data associated with the request + * @flags request flags * @force_nonblock: whether a context blocking or not * @sq_thread: true if sq_thread has submitted this SQE * @@ -333,41 +334,60 @@ TRACE_EVENT(io_uring_complete, */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, - bool sq_thread), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, + bool force_nonblock, bool sq_thread), - TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), + TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) + __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; + __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; ), - TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->force_nonblock, __entry->sq_thread) + TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " + "non block %d, sq_thread %d", __entry->ctx, __entry->req, + __entry->opcode, (unsigned long long)__entry->user_data, + __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); +/* + * io_uring_poll_arm - called after arming a poll wait if successful + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * @mask: request poll events mask + * @events: registered events of interest + * + * Allows to track which fds are waiting for and what are the events of + * interest. + */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, + int mask, int events), - TP_ARGS(ctx, opcode, user_data, mask, events), + TP_ARGS(ctx, req, opcode, user_data, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) __field( int, mask ) @@ -376,16 +396,17 @@ TRACE_EVENT(io_uring_poll_arm, TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; __entry->mask = mask; __entry->events = events; ), - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask, __entry->events) + TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask, __entry->events) ); TRACE_EVENT(io_uring_poll_wake, @@ -440,27 +461,40 @@ TRACE_EVENT(io_uring_task_add, __entry->mask) ); +/* + * io_uring_task_run - called when task_work_run() executes the poll events + * notification callbacks + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * + * Allows to track when notified poll events are processed + */ TRACE_EVENT(io_uring_task_run, - TP_PROTO(void *ctx, u8 opcode, u64 user_data), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), - TP_ARGS(ctx, opcode, user_data), + TP_ARGS(ctx, req, opcode, user_data), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; ), - TP_printk("ring %p, op %d, data 0x%llx", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data) + TP_printk("ring %p, req %p, op %d, data 0x%llx", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data) ); #endif /* _TRACE_IO_URING_H */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index d16a32867f3a..a4dfe005983d 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -394,6 +394,107 @@ TRACE_EVENT(jbd2_lock_buffer_stall, __entry->stall_ms) ); +DECLARE_EVENT_CLASS(jbd2_journal_shrink, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, + unsigned long count), + + TP_ARGS(journal, nr_to_scan, count), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, nr_to_scan) + __field(unsigned long, count) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->nr_to_scan = nr_to_scan; + __entry->count = count; + ), + + TP_printk("dev %d,%d nr_to_scan %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_to_scan, __entry->count) +); + +DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), + + TP_ARGS(journal, nr_to_scan, count) +); + +DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), + + TP_ARGS(journal, nr_to_scan, count) +); + +TRACE_EVENT(jbd2_shrink_scan_exit, + + TP_PROTO(journal_t *journal, unsigned long nr_to_scan, + unsigned long nr_shrunk, unsigned long count), + + TP_ARGS(journal, nr_to_scan, nr_shrunk, count), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, nr_to_scan) + __field(unsigned long, nr_shrunk) + __field(unsigned long, count) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->nr_to_scan = nr_to_scan; + __entry->nr_shrunk = nr_shrunk; + __entry->count = count; + ), + + TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_to_scan, __entry->nr_shrunk, + __entry->count) +); + +TRACE_EVENT(jbd2_shrink_checkpoint_list, + + TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, + unsigned long nr_freed, unsigned long nr_scanned, + tid_t next_tid), + + TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, + nr_scanned, next_tid), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(tid_t, first_tid) + __field(tid_t, tid) + __field(tid_t, last_tid) + __field(unsigned long, nr_freed) + __field(unsigned long, nr_scanned) + __field(tid_t, next_tid) + ), + + TP_fast_assign( + __entry->dev = journal->j_fs_dev->bd_dev; + __entry->first_tid = first_tid; + __entry->tid = tid; + __entry->last_tid = last_tid; + __entry->nr_freed = nr_freed; + __entry->nr_scanned = nr_scanned; + __entry->next_tid = next_tid; + ), + + TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " + "scanned %lu next transaction %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->first_tid, __entry->tid, __entry->last_tid, + __entry->nr_freed, __entry->nr_scanned, __entry->next_tid) +); + #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 829a75692cc0..ddc8c944f417 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -173,7 +173,7 @@ TRACE_EVENT(mm_page_free, __entry->order = order; ), - TP_printk("page=%p pfn=%lu order=%d", + TP_printk("page=%p pfn=0x%lx order=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order) @@ -193,7 +193,7 @@ TRACE_EVENT(mm_page_free_batched, __entry->pfn = page_to_pfn(page); ), - TP_printk("page=%p pfn=%lu order=0", + TP_printk("page=%p pfn=0x%lx order=0", pfn_to_page(__entry->pfn), __entry->pfn) ); @@ -219,7 +219,7 @@ TRACE_EVENT(mm_page_alloc, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s", __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, @@ -245,7 +245,7 @@ DECLARE_EVENT_CLASS(mm_page, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d", __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, @@ -278,7 +278,7 @@ TRACE_EVENT(mm_page_pcpu_drain, __entry->migratetype = migratetype; ), - TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) ); @@ -312,7 +312,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, get_pageblock_migratetype(page)); ), - TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->alloc_order, diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h index f9802562edf6..491098a0d8ed 100644 --- a/include/trace/events/kyber.h +++ b/include/trace/events/kyber.h @@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency, ), TP_fast_assign( - __entry->dev = disk_devt(queue_to_disk(q)); + __entry->dev = disk_devt(q->disk); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->type, type, sizeof(__entry->type)); __entry->percentile = percentile; @@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust, ), TP_fast_assign( - __entry->dev = disk_devt(queue_to_disk(q)); + __entry->dev = disk_devt(q->disk); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); __entry->depth = depth; ), @@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled, ), TP_fast_assign( - __entry->dev = disk_devt(queue_to_disk(q)); + __entry->dev = disk_devt(q->disk); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); ), diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 9fb2a3bbcdfb..779f3fad9ecd 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -21,7 +21,8 @@ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ EM( MR_CONTIG_RANGE, "contig_range") \ - EMe(MR_LONGTERM_PIN, "longterm_pin") + EM( MR_LONGTERM_PIN, "longterm_pin") \ + EMe(MR_DEMOTION, "demotion") /* * First define the enums in the above macros to be exported to userspace diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 629c7a0eaff2..116ed4d5d0f8 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -48,7 +48,9 @@ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ @@ -73,7 +75,7 @@ #define IF_HAVE_PG_HWPOISON(flag,string) #endif -#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string} #else #define IF_HAVE_PG_IDLE(flag,string) @@ -85,6 +87,12 @@ #define IF_HAVE_PG_ARCH_2(flag,string) #endif +#ifdef CONFIG_KASAN_HW_TAGS +#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) +#endif + #define __def_pageflag_names \ {1UL << PG_locked, "locked" }, \ {1UL << PG_waiters, "waiters" }, \ @@ -112,7 +120,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ -IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) +IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ +IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ @@ -156,7 +165,6 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) {VM_UFFD_MISSING, "uffd_missing" }, \ IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ - {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ {VM_LOCKED, "locked" }, \ {VM_IO, "io" }, \ diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h index 775a46d0b0f0..6bf43176f14c 100644 --- a/include/trace/events/mptcp.h +++ b/include/trace/events/mptcp.h @@ -73,6 +73,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __field(u64, data_seq) __field(u32, subflow_seq) __field(u16, data_len) + __field(u16, csum) __field(u8, use_map) __field(u8, dsn64) __field(u8, data_fin) @@ -82,6 +83,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __field(u8, frozen) __field(u8, reset_transient) __field(u8, reset_reason) + __field(u8, csum_reqd) ), TP_fast_assign( @@ -89,6 +91,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __entry->data_seq = mpext->data_seq; __entry->subflow_seq = mpext->subflow_seq; __entry->data_len = mpext->data_len; + __entry->csum = (__force u16)mpext->csum; __entry->use_map = mpext->use_map; __entry->dsn64 = mpext->dsn64; __entry->data_fin = mpext->data_fin; @@ -98,16 +101,18 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __entry->frozen = mpext->frozen; __entry->reset_transient = mpext->reset_transient; __entry->reset_reason = mpext->reset_reason; + __entry->csum_reqd = mpext->csum_reqd; ), - TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u", + TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u", __entry->data_ack, __entry->data_seq, __entry->subflow_seq, __entry->data_len, - __entry->use_map, __entry->dsn64, - __entry->data_fin, __entry->use_ack, - __entry->ack64, __entry->mpc_map, - __entry->frozen, __entry->reset_transient, - __entry->reset_reason) + __entry->csum, __entry->use_map, + __entry->dsn64, __entry->data_fin, + __entry->use_ack, __entry->ack64, + __entry->mpc_map, __entry->frozen, + __entry->reset_transient, __entry->reset_reason, + __entry->csum_reqd) ); DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status, diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 2399073c3afc..78c448c6ab4c 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template, __assign_str(name, skb->dev->name); ), - TP_printk("dev=%s skbaddr=%p len=%u", + TP_printk("dev=%s skbaddr=%px len=%u", __get_str(name), __entry->skbaddr, __entry->len) ) diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index de1c64635e42..4d470bffd9f1 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -139,7 +139,7 @@ TRACE_EVENT(netfs_read, TP_fast_assign( __entry->rreq = rreq->debug_id; - __entry->cookie = rreq->cookie_debug_id; + __entry->cookie = rreq->cache_resources.debug_id; __entry->start = start; __entry->len = len; __entry->what = what; diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h new file mode 100644 index 000000000000..82f741ec0f57 --- /dev/null +++ b/include/trace/events/osnoise.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM osnoise + +#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _OSNOISE_TRACE_H + +#include <linux/tracepoint.h> +TRACE_EVENT(thread_noise, + + TP_PROTO(struct task_struct *t, u64 start, u64 duration), + + TP_ARGS(t, start, duration), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN) + __field( u64, start ) + __field( u64, duration) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, t->comm, TASK_COMM_LEN); + __entry->pid = t->pid; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%8s:%d start %llu.%09u duration %llu ns", + __entry->comm, + __entry->pid, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(softirq_noise, + + TP_PROTO(int vector, u64 start, u64 duration), + + TP_ARGS(vector, start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __field( int, vector ) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%8s:%d start %llu.%09u duration %llu ns", + show_softirq_name(__entry->vector), + __entry->vector, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(irq_noise, + + TP_PROTO(int vector, const char *desc, u64 start, u64 duration), + + TP_ARGS(vector, desc, start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __string( desc, desc ) + __field( int, vector ) + + ), + + TP_fast_assign( + __assign_str(desc, desc); + __entry->vector = vector; + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("%s:%d start %llu.%09u duration %llu ns", + __get_str(desc), + __entry->vector, + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(nmi_noise, + + TP_PROTO(u64 start, u64 duration), + + TP_ARGS(start, duration), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + ), + + TP_fast_assign( + __entry->start = start; + __entry->duration = duration; + ), + + TP_printk("start %llu.%09u duration %llu ns", + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration) +); + +TRACE_EVENT(sample_threshold, + + TP_PROTO(u64 start, u64 duration, u64 interference), + + TP_ARGS(start, duration, interference), + + TP_STRUCT__entry( + __field( u64, start ) + __field( u64, duration) + __field( u64, interference) + ), + + TP_fast_assign( + __entry->start = start; + __entry->duration = duration; + __entry->interference = interference; + ), + + TP_printk("start %llu.%09u duration %llu ns interference %llu", + __print_ns_to_secs(__entry->start), + __print_ns_without_secs(__entry->start), + __entry->duration, + __entry->interference) +); + +#endif /* _TRACE_OSNOISE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index ad0aa7f31675..ca534501158b 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -60,7 +60,7 @@ TRACE_EVENT(page_pool_state_release, __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p pfn=%lu release=%u", + TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", __entry->pool, __entry->page, __entry->pfn, __entry->release) ); @@ -85,7 +85,7 @@ TRACE_EVENT(page_pool_state_hold, __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p pfn=%lu hold=%u", + TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", __entry->pool, __entry->page, __entry->pfn, __entry->hold) ); diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h index 5d2ea93956ce..8a99c1cd417b 100644 --- a/include/trace/events/page_ref.h +++ b/include/trace/events/page_ref.h @@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template, TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d", __entry->pfn, - show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + show_page_flags(__entry->flags & PAGEFLAGS_MASK), __entry->count, __entry->mapcount, __entry->mapping, __entry->mt, __entry->val) @@ -88,7 +88,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d", __entry->pfn, - show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + show_page_flags(__entry->flags & PAGEFLAGS_MASK), __entry->count, __entry->mapcount, __entry->mapping, __entry->mt, __entry->val, __entry->ret) diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h index e1735fe7c76a..1d28431e85bd 100644 --- a/include/trace/events/pagemap.h +++ b/include/trace/events/pagemap.h @@ -46,7 +46,7 @@ TRACE_EVENT(mm_lru_insertion, ), /* Flag format is based on page-types.c formatting for pagemap */ - TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s", + TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s", __entry->page, __entry->pfn, __entry->lru, @@ -75,7 +75,7 @@ TRACE_EVENT(mm_lru_activate, ), /* Flag format is based on page-types.c formatting for pagemap */ - TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn) + TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn) ); diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 330d32d84485..59c945b66f9c 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -41,11 +41,39 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state = txq->state; ), - TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", + TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); +TRACE_EVENT(qdisc_enqueue, + + TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), + + TP_ARGS(qdisc, txq, skb), + + TP_STRUCT__entry( + __field(struct Qdisc *, qdisc) + __field(const struct netdev_queue *, txq) + __field(void *, skbaddr) + __field(int, ifindex) + __field(u32, handle) + __field(u32, parent) + ), + + TP_fast_assign( + __entry->qdisc = qdisc; + __entry->txq = txq; + __entry->skbaddr = skb; + __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; + __entry->handle = qdisc->handle; + __entry->parent = qdisc->parent; + ), + + TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px", + __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) +); + TRACE_EVENT(qdisc_reset, TP_PROTO(struct Qdisc *q), diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 6768b64bc738..670e41783edd 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended. * "WokeEmpty": rcuo CB kthread woke to find empty list. */ TRACE_EVENT_RCU(rcu_nocb_wake, diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index ffdbe6f85da8..b2a2672e6632 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class, TP_fast_assign( __entry->cred = gc; __entry->service = gc->gc_service; - __assign_str(principal, gc->gc_principal) + __assign_str(principal, gc->gc_principal); ), TP_printk("cred=%p service=%s principal='%s'", @@ -535,7 +535,7 @@ TRACE_EVENT(rpcgss_upcall_msg, ), TP_fast_assign( - __assign_str(msg, buf) + __assign_str(msg, buf); ), TP_printk("msg='%s'", __get_str(msg)) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index bd55908c1bef..de4195499592 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -793,6 +793,39 @@ TRACE_EVENT(xprtrdma_post_send, ) ); +TRACE_EVENT(xprtrdma_post_send_err, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + const struct rpcrdma_req *req, + int rc + ), + + TP_ARGS(r_xprt, req, rc), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, rc) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + const struct rpcrdma_ep *ep = r_xprt->rx_ep; + + __entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client ? + rqst->rq_task->tk_client->cl_clid : -1; + __entry->rc = rc; + ), + + TP_printk("task:%u@%u cq.id=%u rc=%d", + __entry->task_id, __entry->client_id, + __entry->cq_id, __entry->rc + ) +); + TRACE_EVENT(xprtrdma_post_recv, TP_PROTO( const struct rpcrdma_rep *rep @@ -818,16 +851,14 @@ TRACE_EVENT(xprtrdma_post_recv, TRACE_EVENT(xprtrdma_post_recvs, TP_PROTO( const struct rpcrdma_xprt *r_xprt, - unsigned int count, - int status + unsigned int count ), - TP_ARGS(r_xprt, count, status), + TP_ARGS(r_xprt, count), TP_STRUCT__entry( __field(u32, cq_id) __field(unsigned int, count) - __field(int, status) __field(int, posted) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) @@ -838,15 +869,44 @@ TRACE_EVENT(xprtrdma_post_recvs, __entry->cq_id = ep->re_attr.recv_cq->res.id; __entry->count = count; - __entry->status = status; __entry->posted = ep->re_receive_count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)", + TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active", + __get_str(addr), __get_str(port), __entry->cq_id, + __entry->count, __entry->posted + ) +); + +TRACE_EVENT(xprtrdma_post_recvs_err, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + int status + ), + + TP_ARGS(r_xprt, status), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, status) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + const struct rpcrdma_ep *ep = r_xprt->rx_ep; + + __entry->cq_id = ep->re_attr.recv_cq->res.id; + __entry->status = status; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s cq.id=%d rc=%d", __get_str(addr), __get_str(port), __entry->cq_id, - __entry->count, __entry->posted, __entry->status + __entry->status ) ); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1eca2305ca42..94640482cfe7 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) - __field( int, success ) __field( int, target_cpu ) ), @@ -156,7 +155,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ - __entry->success = 1; /* rudiment, kill when possible */ __entry->target_cpu = task_cpu(p); ), diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index f624969a4f14..370ade0d4093 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -124,50 +124,6 @@ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST)) -#define scsi_driverbyte_name(result) { result, #result } -#define show_driverbyte_name(val) \ - __print_symbolic(val, \ - scsi_driverbyte_name(DRIVER_OK), \ - scsi_driverbyte_name(DRIVER_BUSY), \ - scsi_driverbyte_name(DRIVER_SOFT), \ - scsi_driverbyte_name(DRIVER_MEDIA), \ - scsi_driverbyte_name(DRIVER_ERROR), \ - scsi_driverbyte_name(DRIVER_INVALID), \ - scsi_driverbyte_name(DRIVER_TIMEOUT), \ - scsi_driverbyte_name(DRIVER_HARD), \ - scsi_driverbyte_name(DRIVER_SENSE)) - -#define scsi_msgbyte_name(result) { result, #result } -#define show_msgbyte_name(val) \ - __print_symbolic(val, \ - scsi_msgbyte_name(COMMAND_COMPLETE), \ - scsi_msgbyte_name(EXTENDED_MESSAGE), \ - scsi_msgbyte_name(SAVE_POINTERS), \ - scsi_msgbyte_name(RESTORE_POINTERS), \ - scsi_msgbyte_name(DISCONNECT), \ - scsi_msgbyte_name(INITIATOR_ERROR), \ - scsi_msgbyte_name(ABORT_TASK_SET), \ - scsi_msgbyte_name(MESSAGE_REJECT), \ - scsi_msgbyte_name(NOP), \ - scsi_msgbyte_name(MSG_PARITY_ERROR), \ - scsi_msgbyte_name(LINKED_CMD_COMPLETE), \ - scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \ - scsi_msgbyte_name(TARGET_RESET), \ - scsi_msgbyte_name(ABORT_TASK), \ - scsi_msgbyte_name(CLEAR_TASK_SET), \ - scsi_msgbyte_name(INITIATE_RECOVERY), \ - scsi_msgbyte_name(RELEASE_RECOVERY), \ - scsi_msgbyte_name(CLEAR_ACA), \ - scsi_msgbyte_name(LOGICAL_UNIT_RESET), \ - scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \ - scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \ - scsi_msgbyte_name(ORDERED_QUEUE_TAG), \ - scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \ - scsi_msgbyte_name(ACA), \ - scsi_msgbyte_name(QAS_REQUEST), \ - scsi_msgbyte_name(BUS_DEVICE_RESET), \ - scsi_msgbyte_name(ABORT)) - #define scsi_statusbyte_name(result) { result, #result } #define show_statusbyte_name(val) \ __print_symbolic(val, \ @@ -327,9 +283,9 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), - show_driverbyte_name(((__entry->result) >> 24) & 0xff), + "DRIVER_OK", show_hostbyte_name(((__entry->result) >> 16) & 0xff), - show_msgbyte_name(((__entry->result) >> 8) & 0xff), + "COMMAND_COMPLETE", show_statusbyte_name(__entry->result & 0xff)) ); diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index a966d4b5ab37..12c315782766 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -201,6 +201,66 @@ TRACE_EVENT(inet_sock_set_state, show_tcp_state_name(__entry->newstate)) ); +TRACE_EVENT(inet_sk_error_report, + + TP_PROTO(const struct sock *sk), + + TP_ARGS(sk), + + TP_STRUCT__entry( + __field(int, error) + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + __field(__u16, protocol) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __array(__u8, saddr_v6, 16) + __array(__u8, daddr_v6, 16) + ), + + TP_fast_assign( + struct inet_sock *inet = inet_sk(sk); + struct in6_addr *pin6; + __be32 *p32; + + __entry->error = sk->sk_err; + __entry->family = sk->sk_family; + __entry->protocol = sk->sk_protocol; + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + + p32 = (__be32 *) __entry->saddr; + *p32 = inet->inet_saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = inet->inet_daddr; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + pin6 = (struct in6_addr *)__entry->saddr_v6; + *pin6 = sk->sk_v6_rcv_saddr; + pin6 = (struct in6_addr *)__entry->daddr_v6; + *pin6 = sk->sk_v6_daddr; + } else +#endif + { + pin6 = (struct in6_addr *)__entry->saddr_v6; + ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); + pin6 = (struct in6_addr *)__entry->daddr_v6; + ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); + } + ), + + TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d", + show_family_name(__entry->family), + show_inet_protocol_name(__entry->protocol), + __entry->sport, __entry->dport, + __entry->saddr, __entry->daddr, + __entry->saddr_v6, __entry->daddr_v6, + __entry->error) +); + #endif /* _TRACE_SOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h index 0dd9171d2ad8..c0d9844befd7 100644 --- a/include/trace/events/spi.h +++ b/include/trace/events/spi.h @@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy, ); +TRACE_EVENT(spi_setup, + TP_PROTO(struct spi_device *spi, int status), + TP_ARGS(spi, status), + + TP_STRUCT__entry( + __field(int, bus_num) + __field(int, chip_select) + __field(unsigned long, mode) + __field(unsigned int, bits_per_word) + __field(unsigned int, max_speed_hz) + __field(int, status) + ), + + TP_fast_assign( + __entry->bus_num = spi->controller->bus_num; + __entry->chip_select = spi->chip_select; + __entry->mode = spi->mode; + __entry->bits_per_word = spi->bits_per_word; + __entry->max_speed_hz = spi->max_speed_hz; + __entry->status = status; + ), + + TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d", + __entry->bus_num, __entry->chip_select, + (__entry->mode & SPI_MODE_X_MASK), + (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "", + (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "", + (__entry->mode & SPI_3WIRE) ? "3wire, " : "", + (__entry->mode & SPI_LOOP) ? "loopback, " : "", + __entry->bits_per_word, __entry->max_speed_hz, + __entry->status) +); + +TRACE_EVENT(spi_set_cs, + TP_PROTO(struct spi_device *spi, bool enable), + TP_ARGS(spi, enable), + + TP_STRUCT__entry( + __field(int, bus_num) + __field(int, chip_select) + __field(unsigned long, mode) + __field(bool, enable) + ), + + TP_fast_assign( + __entry->bus_num = spi->controller->bus_num; + __entry->chip_select = spi->chip_select; + __entry->mode = spi->mode; + __entry->enable = enable; + ), + + TP_printk("spi%d.%d %s%s", + __entry->bus_num, __entry->chip_select, + __entry->enable ? "activate" : "deactivate", + (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "") +); + DECLARE_EVENT_CLASS(spi_message, TP_PROTO(struct spi_message *msg), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index d02e01a27b69..2d04eb96d418 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -154,8 +154,8 @@ TRACE_EVENT(rpc_clnt_new, __entry->client_id = clnt->cl_clid; __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); - __assign_str(program, program) - __assign_str(server, server) + __assign_str(program, program); + __assign_str(server, server); ), TP_printk("client=%u peer=[%s]:%s program=%s server=%s", @@ -180,8 +180,8 @@ TRACE_EVENT(rpc_clnt_new_err, TP_fast_assign( __entry->error = error; - __assign_str(program, program) - __assign_str(server, server) + __assign_str(program, program); + __assign_str(server, server); ), TP_printk("program=%s server=%s error=%d", @@ -284,8 +284,8 @@ TRACE_EVENT(rpc_request, __entry->client_id = task->tk_client->cl_clid; __entry->version = task->tk_client->cl_vers; __entry->async = RPC_IS_ASYNC(task); - __assign_str(progname, task->tk_client->cl_program->name) - __assign_str(procname, rpc_proc_name(task)) + __assign_str(progname, task->tk_client->cl_program->name); + __assign_str(procname, rpc_proc_name(task)); ), TP_printk("task:%u@%u %sv%d %s (%ssync)", @@ -295,25 +295,11 @@ TRACE_EVENT(rpc_request, ) ); -TRACE_DEFINE_ENUM(RPC_TASK_ASYNC); -TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER); -TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS); -TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN); -TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS); -TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC); -TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN); -TRACE_DEFINE_ENUM(RPC_TASK_SOFT); -TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN); -TRACE_DEFINE_ENUM(RPC_TASK_SENT); -TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT); -TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT); -TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT); -TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF); - #define rpc_show_task_flags(flags) \ __print_flags(flags, "|", \ { RPC_TASK_ASYNC, "ASYNC" }, \ { RPC_TASK_SWAPPER, "SWAPPER" }, \ + { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \ @@ -327,14 +313,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF); { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \ { RPC_TASK_CRED_NOREF, "CRED_NOREF" }) -TRACE_DEFINE_ENUM(RPC_TASK_RUNNING); -TRACE_DEFINE_ENUM(RPC_TASK_QUEUED); -TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE); -TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT); -TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV); -TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT); -TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED); - #define rpc_show_runstate(flags) \ __print_flags(flags, "|", \ { (1UL << RPC_TASK_RUNNING), "RUNNING" }, \ @@ -494,10 +472,10 @@ DECLARE_EVENT_CLASS(rpc_reply_event, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); - __assign_str(progname, task->tk_client->cl_program->name) + __assign_str(progname, task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procname, rpc_proc_name(task)) - __assign_str(servername, task->tk_xprt->servername) + __assign_str(procname, rpc_proc_name(task)); + __assign_str(servername, task->tk_xprt->servername); ), TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s", @@ -622,8 +600,8 @@ TRACE_EVENT(rpc_stats_latency, __entry->task_id = task->tk_pid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; - __assign_str(progname, task->tk_client->cl_program->name) - __assign_str(procname, rpc_proc_name(task)) + __assign_str(progname, task->tk_client->cl_program->name); + __assign_str(procname, rpc_proc_name(task)); __entry->backlog = ktime_to_us(backlog); __entry->rtt = ktime_to_us(rtt); __entry->execute = ktime_to_us(execute); @@ -669,15 +647,15 @@ TRACE_EVENT(rpc_xdr_overflow, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name); } else { __entry->task_id = 0; __entry->client_id = 0; - __assign_str(progname, "unknown") + __assign_str(progname, "unknown"); __entry->version = 0; - __assign_str(procedure, "unknown") + __assign_str(procedure, "unknown"); } __entry->requested = requested; __entry->end = xdr->end; @@ -735,9 +713,9 @@ TRACE_EVENT(rpc_xdr_alignment, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name); __entry->offset = offset; __entry->copied = copied; @@ -945,17 +923,6 @@ TRACE_EVENT(rpc_socket_nospace, ) ); -TRACE_DEFINE_ENUM(XPRT_LOCKED); -TRACE_DEFINE_ENUM(XPRT_CONNECTED); -TRACE_DEFINE_ENUM(XPRT_CONNECTING); -TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT); -TRACE_DEFINE_ENUM(XPRT_BOUND); -TRACE_DEFINE_ENUM(XPRT_BINDING); -TRACE_DEFINE_ENUM(XPRT_CLOSING); -TRACE_DEFINE_ENUM(XPRT_CONGESTED); -TRACE_DEFINE_ENUM(XPRT_CWND_WAIT); -TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE); - #define rpc_show_xprt_state(x) \ __print_flags(x, "|", \ { (1UL << XPRT_LOCKED), "LOCKED"}, \ @@ -965,6 +932,8 @@ TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE); { (1UL << XPRT_BOUND), "BOUND"}, \ { (1UL << XPRT_BINDING), "BINDING"}, \ { (1UL << XPRT_CLOSING), "CLOSING"}, \ + { (1UL << XPRT_OFFLINE), "OFFLINE"}, \ + { (1UL << XPRT_REMOVE), "REMOVE"}, \ { (1UL << XPRT_CONGESTED), "CONGESTED"}, \ { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \ { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"}) @@ -1092,10 +1061,10 @@ TRACE_EVENT(xprt_retransmit, __field(u32, xid) __field(int, ntrans) __field(int, version) + __field(unsigned long, timeout) __string(progname, rqst->rq_task->tk_client->cl_program->name) - __string(procedure, - rqst->rq_task->tk_msg.rpc_proc->p_name) + __string(procname, rpc_proc_name(rqst->rq_task)) ), TP_fast_assign( @@ -1106,17 +1075,19 @@ TRACE_EVENT(xprt_retransmit, task->tk_client->cl_clid : -1; __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->ntrans = rqst->rq_ntrans; + __entry->timeout = task->tk_timeout; __assign_str(progname, - task->tk_client->cl_program->name) + task->tk_client->cl_program->name); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + __assign_str(procname, rpc_proc_name(task)); ), TP_printk( - "task:%u@%u xid=0x%08x %sv%d %s ntrans=%d", + "task:%u@%u xid=0x%08x %sv%d %s ntrans=%d timeout=%lu", __entry->task_id, __entry->client_id, __entry->xid, - __get_str(progname), __entry->version, __get_str(procedure), - __entry->ntrans) + __get_str(progname), __entry->version, __get_str(procname), + __entry->ntrans, __entry->timeout + ) ); TRACE_EVENT(xprt_ping, @@ -1568,8 +1539,7 @@ DEFINE_SVCXDRBUF_EVENT(sendto); svc_rqst_flag(SPLICE_OK) \ svc_rqst_flag(VICTIM) \ svc_rqst_flag(BUSY) \ - svc_rqst_flag(DATA) \ - svc_rqst_flag_end(AUTHERR) + svc_rqst_flag_end(DATA) #undef svc_rqst_flag #undef svc_rqst_flag_end @@ -1611,9 +1581,9 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE); { SVC_COMPLETE, "SVC_COMPLETE" }) TRACE_EVENT(svc_authenticate, - TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat), + TP_PROTO(const struct svc_rqst *rqst, int auth_res), - TP_ARGS(rqst, auth_res, auth_stat), + TP_ARGS(rqst, auth_res), TP_STRUCT__entry( __field(u32, xid) @@ -1624,7 +1594,7 @@ TRACE_EVENT(svc_authenticate, TP_fast_assign( __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->svc_status = auth_res; - __entry->auth_stat = be32_to_cpu(auth_stat); + __entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat); ), TP_printk("xid=0x%08x auth_res=%s auth_stat=%s", @@ -1642,7 +1612,7 @@ TRACE_EVENT(svc_process, __field(u32, vers) __field(u32, proc) __string(service, name) - __string(procedure, rqst->rq_procinfo->pc_name) + __string(procedure, svc_proc_name(rqst)) __string(addr, rqst->rq_xprt ? rqst->rq_xprt->xpt_remotebuf : "(null)") ), @@ -1652,7 +1622,7 @@ TRACE_EVENT(svc_process, __entry->vers = rqst->rq_vers; __entry->proc = rqst->rq_proc; __assign_str(service, name); - __assign_str(procedure, rqst->rq_procinfo->pc_name); + __assign_str(procedure, svc_proc_name(rqst)); __assign_str(addr, rqst->rq_xprt ? rqst->rq_xprt->xpt_remotebuf : "(null)"); ), @@ -1842,7 +1812,7 @@ TRACE_EVENT(svc_xprt_accept, TP_fast_assign( __assign_str(addr, xprt->xpt_remotebuf); - __assign_str(protocol, xprt->xpt_class->xcl_name) + __assign_str(protocol, xprt->xpt_class->xcl_name); __assign_str(service, service); ), @@ -1918,7 +1888,7 @@ TRACE_EVENT(svc_stats_latency, TP_STRUCT__entry( __field(u32, xid) __field(unsigned long, execute) - __string(procedure, rqst->rq_procinfo->pc_name) + __string(procedure, svc_proc_name(rqst)) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), @@ -1926,7 +1896,7 @@ TRACE_EVENT(svc_stats_latency, __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->execute = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_stime)); - __assign_str(procedure, rqst->rq_procinfo->pc_name); + __assign_str(procedure, svc_proc_name(rqst)); __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ba94857eea11..521059d8dc0a 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -295,6 +295,82 @@ TRACE_EVENT(tcp_probe, __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie) ); +#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \ + do { \ + const struct tcphdr *th = (const struct tcphdr *)skb->data; \ + struct sockaddr_in *v4 = (void *)__entry->saddr; \ + \ + v4->sin_family = AF_INET; \ + v4->sin_port = th->source; \ + v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \ + v4 = (void *)__entry->daddr; \ + v4->sin_family = AF_INET; \ + v4->sin_port = th->dest; \ + v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) + +#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ + do { \ + const struct iphdr *iph = ip_hdr(skb); \ + \ + if (iph->version == 6) { \ + const struct tcphdr *th = (const struct tcphdr *)skb->data; \ + struct sockaddr_in6 *v6 = (void *)__entry->saddr; \ + \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = th->source; \ + v6->sin6_addr = ipv6_hdr(skb)->saddr; \ + v6 = (void *)__entry->daddr; \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = th->dest; \ + v6->sin6_addr = ipv6_hdr(skb)->daddr; \ + } else \ + TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \ + } while (0) + +#else + +#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ + TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) + +#endif + +/* + * tcp event with only skb + */ +DECLARE_EVENT_CLASS(tcp_event_skb, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field(const void *, skbaddr) + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS_SKB(__entry, skb); + ), + + TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr) +); + +DEFINE_EVENT(tcp_event_skb, tcp_bad_csum, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 1cb6f1afba0e..599739ee7b20 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -246,6 +246,26 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init, int dev_state, int link_state), TP_ARGS(dev_name, err, usecs, dev_state, link_state)); +DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TRACE_EVENT(ufshcd_command, TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, unsigned int tag, u32 doorbell, int transfer_len, u32 intr, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 2070df64958e..88faf2400ec2 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -330,7 +330,7 @@ TRACE_EVENT(mm_vmscan_writepage, page_is_file_lru(page)); ), - TP_printk("page=%p pfn=%lu flags=%s", + TP_printk("page=%p pfn=0x%lx flags=%s", pfn_to_page(__entry->pfn), __entry->pfn, show_reclaim_flags(__entry->reclaim_flags)) @@ -423,47 +423,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active, show_reclaim_flags(__entry->reclaim_flags)) ); -TRACE_EVENT(mm_vmscan_inactive_list_is_low, - - TP_PROTO(int nid, int reclaim_idx, - unsigned long total_inactive, unsigned long inactive, - unsigned long total_active, unsigned long active, - unsigned long ratio, int file), - - TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file), - - TP_STRUCT__entry( - __field(int, nid) - __field(int, reclaim_idx) - __field(unsigned long, total_inactive) - __field(unsigned long, inactive) - __field(unsigned long, total_active) - __field(unsigned long, active) - __field(unsigned long, ratio) - __field(int, reclaim_flags) - ), - - TP_fast_assign( - __entry->nid = nid; - __entry->reclaim_idx = reclaim_idx; - __entry->total_inactive = total_inactive; - __entry->inactive = inactive; - __entry->total_active = total_active; - __entry->active = active; - __entry->ratio = ratio; - __entry->reclaim_flags = trace_reclaim_flags(file) & - RECLAIM_WB_LRU; - ), - - TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s", - __entry->nid, - __entry->reclaim_idx, - __entry->total_inactive, __entry->inactive, - __entry->total_active, __entry->active, - __entry->ratio, - show_reclaim_flags(__entry->reclaim_flags)) -); - TRACE_EVENT(mm_vmscan_node_reclaim_begin, TP_PROTO(int nid, int order, gfp_t gfp_flags), diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h index 6782213778be..d0b3f0ea9ba1 100644 --- a/include/trace/events/vsock_virtio_transport_common.h +++ b/include/trace/events/vsock_virtio_transport_common.h @@ -9,9 +9,12 @@ #include <linux/tracepoint.h> TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM); +TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET); #define show_type(val) \ - __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }) + __print_symbolic(val, \ + { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \ + { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" }) TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 1efa463c4979..840d1ba84cf5 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -36,7 +36,8 @@ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ - EMe(WB_REASON_FORKER_THREAD, "forker_thread") + EM( WB_REASON_FORKER_THREAD, "forker_thread") \ + EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush") WB_WORK_REASON diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index fcad3645a70b..c40fc97f9417 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -110,7 +110,11 @@ DECLARE_EVENT_CLASS(xdp_redirect_template, u32 ifindex = 0, map_index = index; if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; + /* Just leave to_ifindex to 0 if do broadcast redirect, + * as tgt will be NULL. + */ + if (tgt) + ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { ifindex = index; map_index = 0; diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 8268bf747d6f..08810a463880 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -102,6 +102,9 @@ TRACE_MAKE_SYSTEM_STR(); #undef __string #define __string(item, src) __dynamic_array(char, item, -1) +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) @@ -197,6 +200,9 @@ TRACE_MAKE_SYSTEM_STR(); #undef __string #define __string(item, src) __dynamic_array(char, item, -1) +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) @@ -358,6 +364,21 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) +#undef __print_ns_to_secs +#define __print_ns_to_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + do_div(____val, NSEC_PER_SEC); \ + ____val; \ + }) + +#undef __print_ns_without_secs +#define __print_ns_without_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + (u32) do_div(____val, NSEC_PER_SEC); \ + }) + #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ @@ -444,6 +465,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) @@ -492,6 +516,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ #define __string(item, src) __dynamic_array(char, item, \ strlen((src) ? (const char *)(src) : "(null)") + 1) +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1) + /* * __bitmask_size_in_bytes_raw is the number of bytes needed to hold * num_possible_cpus(). @@ -655,10 +682,20 @@ static inline notrace int trace_event_get_offsets_##call( \ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + #undef __assign_str #define __assign_str(dst, src) \ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); +#undef __assign_str_len +#define __assign_str_len(dst, src, len) \ + do { \ + memcpy(__get_str(dst), (src), (len)); \ + __get_str(dst)[len] = '\0'; \ + } while(0) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) @@ -736,6 +773,16 @@ static inline void ftrace_test_probe_##call(void) \ #undef __print_array #undef __print_hex_dump +/* + * The below is not executed in the kernel. It is only what is + * displayed in the print format for userspace to parse. + */ +#undef __print_ns_to_secs +#define __print_ns_to_secs(val) (val) / 1000000000UL + +#undef __print_ns_without_secs +#define __print_ns_without_secs(val) (val) % 1000000000UL + #undef TP_printk #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index f94f65d429be..1567a3294c3d 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -72,6 +72,9 @@ #define MADV_COLD 20 /* deactivate these pages */ #define MADV_PAGEOUT 21 /* reclaim these pages */ +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 5a3c221f4c9d..3ba180f550d7 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -29,6 +29,11 @@ typedef union sigval { #define __ARCH_SI_ATTRIBUTES #endif +/* + * Be careful when extending this union. On 32bit siginfo_t is 32bit + * aligned. Which means that a 64bit field or any other field that + * would increase the alignment of siginfo_t will break the ABI. + */ union __sifields { /* kill() */ struct { diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 4dcd13d097a9..1f0a2b4864e4 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -122,6 +122,10 @@ #define SO_PREFER_BUSY_POLL 69 #define SO_BUSY_POLL_BUDGET 70 +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 6de5a7fc066b..1c5fb86d455a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise) #define __NR_remap_file_pages 234 __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) #define __NR_mbind 235 -__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind) +__SYSCALL(__NR_mbind, sys_mbind) #define __NR_get_mempolicy 236 -__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy) +__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) #define __NR_set_mempolicy 237 -__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy) +__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) #define __NR_migrate_pages 238 -__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages) +__SYSCALL(__NR_migrate_pages, sys_migrate_pages) #define __NR_move_pages 239 -__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages) +__SYSCALL(__NR_move_pages, sys_move_pages) #endif #define __NR_rt_tgsigqueueinfo 240 @@ -863,8 +863,8 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise) __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) #define __NR_mount_setattr 442 __SYSCALL(__NR_mount_setattr, sys_mount_setattr) -#define __NR_quotactl_path 443 -__SYSCALL(__NR_quotactl_path, sys_quotactl_path) +#define __NR_quotactl_fd 443 +__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd) #define __NR_landlock_create_ruleset 444 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) @@ -873,8 +873,15 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#ifdef __ARCH_WANT_MEMFD_SECRET +#define __NR_memfd_secret 447 +__SYSCALL(__NR_memfd_secret, sys_memfd_secret) +#endif +#define __NR_process_mrelease 448 +__SYSCALL(__NR_process_mrelease, sys_process_mrelease) + #undef __NR_syscalls -#define __NR_syscalls 447 +#define __NR_syscalls 449 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 728566542f8a..0cbd1540aeac 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -116,8 +116,6 @@ extern "C" { #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) /* Flag that the memory should be in VRAM and cleared */ #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) -/* Flag that create shadow bo(GTT) while allocating vram bo */ -#define AMDGPU_GEM_CREATE_SHADOW (1 << 4) /* Flag that allocating the BO should use linear VRAM */ #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) /* Flag that BO is always valid in this VM */ @@ -138,6 +136,10 @@ extern "C" { * accessing it with various hw blocks */ #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10) +/* Flag that BO will be used only in preemptible context, which does + * not require GTT memory accounting + */ +#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -755,6 +757,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VBIOS_SIZE 0x1 /* Subquery id: Query vbios image */ #define AMDGPU_INFO_VBIOS_IMAGE 0x2 + /* Subquery id: Query vbios info */ + #define AMDGPU_INFO_VBIOS_INFO 0x3 /* Query UVD handles */ #define AMDGPU_INFO_NUM_HANDLES 0x1C /* Query sensor related information */ @@ -948,6 +952,15 @@ struct drm_amdgpu_info_firmware { __u32 feature; }; +struct drm_amdgpu_info_vbios { + __u8 name[64]; + __u8 vbios_pn[64]; + __u32 version; + __u32 pad; + __u8 vbios_ver_str[32]; + __u8 date[32]; +}; + #define AMDGPU_VRAM_TYPE_UNKNOWN 0 #define AMDGPU_VRAM_TYPE_GDDR1 1 #define AMDGPU_VRAM_TYPE_DDR2 2 @@ -1121,6 +1134,7 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_RV 142 /* Raven */ #define AMDGPU_FAMILY_NV 143 /* Navi10 */ #define AMDGPU_FAMILY_VGH 144 /* Van Gogh */ +#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 67b94bc3c885..3b810b53ba8b 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -635,8 +635,8 @@ struct drm_gem_open { /** * DRM_CAP_VBLANK_HIGH_CRTC * - * If set to 1, the kernel supports specifying a CRTC index in the high bits of - * &drm_wait_vblank_request.type. + * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>` + * in the high bits of &drm_wait_vblank_request.type. * * Starting kernel version 2.6.39, this capability is always set to 1. */ @@ -777,9 +777,12 @@ struct drm_get_cap { /** * DRM_CLIENT_CAP_STEREO_3D * - * if set to 1, the DRM core will expose the stereo 3D capabilities of the + * If set to 1, the DRM core will expose the stereo 3D capabilities of the * monitor by advertising the supported 3D layouts in the flags of struct - * drm_mode_modeinfo. + * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 3.13. */ #define DRM_CLIENT_CAP_STEREO_3D 1 @@ -788,6 +791,9 @@ struct drm_get_cap { * * If set to 1, the DRM core will expose all planes (overlay, primary, and * cursor) to userspace. + * + * This capability has been introduced in kernel version 3.15. Starting from + * kernel version 3.17, this capability is always supported for all drivers. */ #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 @@ -797,6 +803,13 @@ struct drm_get_cap { * If set to 1, the DRM core will expose atomic properties to userspace. This * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and * &DRM_CLIENT_CAP_ASPECT_RATIO. + * + * If the driver doesn't support atomic mode-setting, enabling this capability + * will fail with -EOPNOTSUPP. + * + * This capability has been introduced in kernel version 4.0. Starting from + * kernel version 4.2, this capability is always supported for atomic-capable + * drivers. */ #define DRM_CLIENT_CAP_ATOMIC 3 @@ -804,6 +817,10 @@ struct drm_get_cap { * DRM_CLIENT_CAP_ASPECT_RATIO * * If set to 1, the DRM core will provide aspect ratio information in modes. + * See ``DRM_MODE_FLAG_PIC_AR_*``. + * + * This capability is always supported for all drivers starting from kernel + * version 4.18. */ #define DRM_CLIENT_CAP_ASPECT_RATIO 4 @@ -811,8 +828,11 @@ struct drm_get_cap { * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS * * If set to 1, the DRM core will expose special connectors to be used for - * writing back to memory the scene setup in the commit. Depends on client - * also supporting DRM_CLIENT_CAP_ATOMIC + * writing back to memory the scene setup in the commit. The client must enable + * &DRM_CLIENT_CAP_ATOMIC first. + * + * This capability is always supported for atomic-capable drivers starting from + * kernel version 4.19. */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 @@ -1030,6 +1050,16 @@ extern "C" { #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) +/** + * DRM_IOCTL_MODE_RMFB - Remove a framebuffer. + * + * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * Warning: removing a framebuffer currently in-use on an enabled plane will + * disable that plane. The CRTC the plane is linked to may also be disabled + * (depending on driver capabilities). + */ #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index f76de49c768f..9f4bb4a6f358 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -168,6 +168,13 @@ extern "C" { #define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ #define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ +/* 64 bpp RGB */ +#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */ + /* * Floating point 64bpp RGB * IEEE 754-2008 binary16 half-precision float @@ -893,9 +900,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) /* * The top 4 bits (out of the 56 bits alloted for specifying vendor specific - * modifiers) denote the category for modifiers. Currently we have only two - * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen - * different categories. + * modifiers) denote the category for modifiers. Currently we have three + * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of + * sixteen different categories. */ #define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \ fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL)) @@ -1011,6 +1018,109 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AFBC_FORMAT_MOD_USM (1ULL << 12) /* + * Arm Fixed-Rate Compression (AFRC) modifiers + * + * AFRC is a proprietary fixed rate image compression protocol and format, + * designed to provide guaranteed bandwidth and memory footprint + * reductions in graphics and media use-cases. + * + * AFRC buffers consist of one or more planes, with the same components + * and meaning as an uncompressed buffer using the same pixel format. + * + * Within each plane, the pixel/luma/chroma values are grouped into + * "coding unit" blocks which are individually compressed to a + * fixed size (in bytes). All coding units within a given plane of a buffer + * store the same number of values, and have the same compressed size. + * + * The coding unit size is configurable, allowing different rates of compression. + * + * The start of each AFRC buffer plane must be aligned to an alignment granule which + * depends on the coding unit size. + * + * Coding Unit Size Plane Alignment + * ---------------- --------------- + * 16 bytes 1024 bytes + * 24 bytes 512 bytes + * 32 bytes 2048 bytes + * + * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned + * to a multiple of the paging tile dimensions. + * The dimensions of each paging tile depend on whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Layout Paging Tile Width Paging Tile Height + * ------ ----------------- ------------------ + * SCAN 16 coding units 4 coding units + * ROT 8 coding units 8 coding units + * + * The dimensions of each coding unit depend on the number of components + * in the compressed plane and whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Number of Components in Plane Layout Coding Unit Width Coding Unit Height + * ----------------------------- --------- ----------------- ------------------ + * 1 SCAN 16 samples 4 samples + * Example: 16x4 luma samples in a 'Y' plane + * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 1 ROT 8 samples 8 samples + * Example: 8x8 luma samples in a 'Y' plane + * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 2 DONT CARE 8 samples 4 samples + * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 3 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer without alpha + * ----------------------------- --------- ----------------- ------------------ + * 4 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer with alpha + */ + +#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02 + +#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode) + +/* + * AFRC coding unit size modifier. + * + * Indicates the number of bytes used to store each compressed coding unit for + * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance + * is the same for both Cb and Cr, which may be stored in separate planes. + * + * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store + * each compressed coding unit in the first plane of the buffer. For RGBA buffers + * this is the only plane, while for semi-planar and fully-planar YUV buffers, + * this corresponds to the luma plane. + * + * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store + * each compressed coding unit in the second and third planes in the buffer. + * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s). + * + * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified + * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero. + * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and + * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified. + */ +#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf +#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL) + +#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size) +#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4) + +/* + * AFRC scanline memory layout. + * + * Indicates if the buffer uses the scanline-optimised layout + * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout. + * The memory layout is the same for all planes. + */ +#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8) + +/* * Arm 16x16 Block U-Interleaved modifier * * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index a5e76aa06ad5..90c55383f1ee 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -312,16 +312,48 @@ struct drm_mode_set_plane { __u32 src_w; }; +/** + * struct drm_mode_get_plane - Get plane metadata. + * + * Userspace can perform a GETPLANE ioctl to retrieve information about a + * plane. + * + * To retrieve the number of formats supported, set @count_format_types to zero + * and call the ioctl. @count_format_types will be updated with the value. + * + * To retrieve these formats, allocate an array with the memory needed to store + * @count_format_types formats. Point @format_type_ptr to this array and call + * the ioctl again (with @count_format_types still set to the value returned in + * the first ioctl call). + */ struct drm_mode_get_plane { + /** + * @plane_id: Object ID of the plane whose information should be + * retrieved. Set by caller. + */ __u32 plane_id; + /** @crtc_id: Object ID of the current CRTC. */ __u32 crtc_id; + /** @fb_id: Object ID of the current fb. */ __u32 fb_id; + /** + * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's + * are created and they receive an index, which corresponds to their + * position in the bitmask. Bit N corresponds to + * :ref:`CRTC index<crtc_index>` N. + */ __u32 possible_crtcs; + /** @gamma_size: Never used. */ __u32 gamma_size; + /** @count_format_types: Number of formats. */ __u32 count_format_types; + /** + * @format_type_ptr: Pointer to ``__u32`` array of formats that are + * supported by the plane. These formats do not require modifiers. + */ __u64 format_type_ptr; }; @@ -413,9 +445,10 @@ enum drm_mode_subconnector { * * **Force-probing a connector** * - * If the @count_modes field is set to zero, the kernel will perform a forced - * probe on the connector to refresh the connector status, modes and EDID. - * A forced-probe can be slow, might cause flickering and the ioctl will block. + * If the @count_modes field is set to zero and the DRM client is the current + * DRM master, the kernel will perform a forced probe on the connector to + * refresh the connector status, modes and EDID. A forced-probe can be slow, + * might cause flickering and the ioctl will block. * * User-space needs to force-probe connectors to ensure their metadata is * up-to-date at startup and after receiving a hot-plug event. User-space @@ -508,22 +541,74 @@ struct drm_mode_get_connector { */ #define DRM_MODE_PROP_ATOMIC 0x80000000 +/** + * struct drm_mode_property_enum - Description for an enum/bitfield entry. + * @value: numeric value for this enum entry. + * @name: symbolic name for this enum entry. + * + * See struct drm_property_enum for details. + */ struct drm_mode_property_enum { __u64 value; char name[DRM_PROP_NAME_LEN]; }; +/** + * struct drm_mode_get_property - Get property metadata. + * + * User-space can perform a GETPROPERTY ioctl to retrieve information about a + * property. The same property may be attached to multiple objects, see + * "Modeset Base Object Abstraction". + * + * The meaning of the @values_ptr field changes depending on the property type. + * See &drm_property.flags for more details. + * + * The @enum_blob_ptr and @count_enum_blobs fields are only meaningful when the + * property has the type &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK. For + * backwards compatibility, the kernel will always set @count_enum_blobs to + * zero when the property has the type &DRM_MODE_PROP_BLOB. User-space must + * ignore these two fields if the property has a different type. + * + * User-space is expected to retrieve values and enums by performing this ioctl + * at least twice: the first time to retrieve the number of elements, the + * second time to retrieve the elements themselves. + * + * To retrieve the number of elements, set @count_values and @count_enum_blobs + * to zero, then call the ioctl. @count_values will be updated with the number + * of elements. If the property has the type &DRM_MODE_PROP_ENUM or + * &DRM_MODE_PROP_BITMASK, @count_enum_blobs will be updated as well. + * + * To retrieve the elements themselves, allocate an array for @values_ptr and + * set @count_values to its capacity. If the property has the type + * &DRM_MODE_PROP_ENUM or &DRM_MODE_PROP_BITMASK, allocate an array for + * @enum_blob_ptr and set @count_enum_blobs to its capacity. Calling the ioctl + * again will fill the arrays. + */ struct drm_mode_get_property { - __u64 values_ptr; /* values and blob lengths */ - __u64 enum_blob_ptr; /* enum and blob id ptrs */ + /** @values_ptr: Pointer to a ``__u64`` array. */ + __u64 values_ptr; + /** @enum_blob_ptr: Pointer to a struct drm_mode_property_enum array. */ + __u64 enum_blob_ptr; + /** + * @prop_id: Object ID of the property which should be retrieved. Set + * by the caller. + */ __u32 prop_id; + /** + * @flags: ``DRM_MODE_PROP_*`` bitfield. See &drm_property.flags for + * a definition of the flags. + */ __u32 flags; + /** + * @name: Symbolic property name. User-space should use this field to + * recognize properties. + */ char name[DRM_PROP_NAME_LEN]; + /** @count_values: Number of elements in @values_ptr. */ __u32 count_values; - /* This is only used to count enum values, not blobs. The _blobs is - * simply because of a historical reason, i.e. backwards compat. */ + /** @count_enum_blobs: Number of elements in @enum_blob_ptr. */ __u32 count_enum_blobs; }; diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 09d0df8b71c5..af024d90453d 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -74,6 +74,9 @@ struct drm_etnaviv_timespec { #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a #define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b +#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c +#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d +#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e #define ETNA_MAX_PIPES 4 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index ddc47bbf48b6..bde5860b3686 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -62,8 +62,8 @@ extern "C" { #define I915_ERROR_UEVENT "ERROR" #define I915_RESET_UEVENT "RESET" -/* - * i915_user_extension: Base class for defining a chain of extensions +/** + * struct i915_user_extension - Base class for defining a chain of extensions * * Many interfaces need to grow over time. In most cases we can simply * extend the struct and have userspace pass in more data. Another option, @@ -76,12 +76,58 @@ extern "C" { * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across * the __user boundary with pointers encapsulated inside u64. + * + * Example chaining: + * + * .. code-block:: C + * + * struct i915_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct i915_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct i915_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; + * + * Typically the struct i915_user_extension would be embedded in some uAPI + * struct, and in this case we would feed it the head of the chain(i.e ext1), + * which would then apply all of the above extensions. + * */ struct i915_user_extension { + /** + * @next_extension: + * + * Pointer to the next struct i915_user_extension, or zero if the end. + */ __u64 next_extension; + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct i915_user_extension. + */ __u32 name; - __u32 flags; /* All undefined bits must be zero. */ - __u32 rsvd[4]; /* Reserved for future use; must be zero. */ + /** + * @flags: MBZ + * + * All undefined bits must be zero. + */ + __u32 flags; + /** + * @rsvd: MBZ + * + * Reserved for future use; must be zero. + */ + __u32 rsvd[4]; }; /* @@ -360,6 +406,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_QUERY 0x39 #define DRM_I915_GEM_VM_CREATE 0x3a #define DRM_I915_GEM_VM_DESTROY 0x3b +#define DRM_I915_GEM_CREATE_EXT 0x3c /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -392,6 +439,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) +#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) @@ -524,6 +572,15 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) +/* + * Indicates the 2k user priority levels are statically mapped into 3 buckets as + * follows: + * + * -1k to -1 Low priority + * 0 Normal priority + * 1 to 1k Highest priority + */ +#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5) #define I915_PARAM_HUC_STATUS 42 @@ -626,6 +683,9 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 +/* Query if the kernel supports the I915_USERPTR_PROBE flag. */ +#define I915_PARAM_HAS_USERPTR_PROBE 56 + /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -801,45 +861,113 @@ struct drm_i915_gem_mmap_gtt { __u64 offset; }; +/** + * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object. + * + * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl, + * and is used to retrieve the fake offset to mmap an object specified by &handle. + * + * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+. + * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave + * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`. + */ struct drm_i915_gem_mmap_offset { - /** Handle for the object being mapped. */ + /** @handle: Handle for the object being mapped. */ __u32 handle; + /** @pad: Must be zero */ __u32 pad; /** - * Fake offset to use for subsequent mmap call + * @offset: The fake offset to use for subsequent mmap call * * This is a fixed-size type for 32/64 compatibility. */ __u64 offset; /** - * Flags for extended behaviour. + * @flags: Flags for extended behaviour. + * + * It is mandatory that one of the `MMAP_OFFSET` types + * should be included: * - * It is mandatory that one of the MMAP_OFFSET types - * (GTT, WC, WB, UC, etc) should be included. + * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined) + * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching. + * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching. + * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching. + * + * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid + * type. On devices without local memory, this caching mode is invalid. + * + * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will + * be used, depending on the object placement on creation. WB will be used + * when the object can only exist in system memory, WC otherwise. */ __u64 flags; -#define I915_MMAP_OFFSET_GTT 0 -#define I915_MMAP_OFFSET_WC 1 -#define I915_MMAP_OFFSET_WB 2 -#define I915_MMAP_OFFSET_UC 3 - /* - * Zero-terminated chain of extensions. +#define I915_MMAP_OFFSET_GTT 0 +#define I915_MMAP_OFFSET_WC 1 +#define I915_MMAP_OFFSET_WB 2 +#define I915_MMAP_OFFSET_UC 3 +#define I915_MMAP_OFFSET_FIXED 4 + + /** + * @extensions: Zero-terminated chain of extensions. * * No current extensions defined; mbz. */ __u64 extensions; }; +/** + * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in + * preparation for accessing the pages via some CPU domain. + * + * Specifying a new write or read domain will flush the object out of the + * previous domain(if required), before then updating the objects domain + * tracking with the new domain. + * + * Note this might involve waiting for the object first if it is still active on + * the GPU. + * + * Supported values for @read_domains and @write_domain: + * + * - I915_GEM_DOMAIN_WC: Uncached write-combined domain + * - I915_GEM_DOMAIN_CPU: CPU cache domain + * - I915_GEM_DOMAIN_GTT: Mappable aperture domain + * + * All other domains are rejected. + * + * Note that for discrete, starting from DG1, this is no longer supported, and + * is instead rejected. On such platforms the CPU domain is effectively static, + * where we also only support a single &drm_i915_gem_mmap_offset cache mode, + * which can't be set explicitly and instead depends on the object placements, + * as per the below. + * + * Implicit caching rules, starting from DG1: + * + * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) + * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and + * mapped as write-combined only. + * + * - Everything else is always allocated and mapped as write-back, with the + * guarantee that everything is also coherent with the GPU. + * + * Note that this is likely to change in the future again, where we might need + * more flexibility on future devices, so making this all explicit as part of a + * new &drm_i915_gem_create_ext extension is probable. + */ struct drm_i915_gem_set_domain { - /** Handle for the object */ + /** @handle: Handle for the object. */ __u32 handle; - /** New read domains */ + /** @read_domains: New read domains. */ __u32 read_domains; - /** New write domain */ + /** + * @write_domain: New write domain. + * + * Note that having something in the write domain implies it's in the + * read domain, and only that read domain. + */ __u32 write_domain; }; @@ -1054,12 +1182,12 @@ struct drm_i915_gem_exec_fence { __u32 flags; }; -/** +/* * See drm_i915_gem_execbuffer_ext_timeline_fences. */ #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 -/** +/* * This structure describes an array of drm_syncobj and associated points for * timeline variants of drm_syncobj. It is invalid to append this structure to * the execbuf if I915_EXEC_FENCE_ARRAY is set. @@ -1300,12 +1428,11 @@ struct drm_i915_gem_busy { * reading from the object simultaneously. * * The value of each engine class is the same as specified in the - * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e. + * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e. * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. - * reported as active itself. Some hardware may have parallel - * execution engines, e.g. multiple media engines, which are - * mapped to the same class identifier and so are not separately - * reported for busyness. + * Some hardware may have parallel execution engines, e.g. multiple + * media engines, which are mapped to the same class identifier and so + * are not separately reported for busyness. * * Caveat emptor: * Only the boolean result of this query is reliable; that is whether @@ -1316,43 +1443,79 @@ struct drm_i915_gem_busy { }; /** - * I915_CACHING_NONE - * - * GPU access is not coherent with cpu caches. Default for machines without an - * LLC. - */ -#define I915_CACHING_NONE 0 -/** - * I915_CACHING_CACHED - * - * GPU access is coherent with cpu caches and furthermore the data is cached in - * last-level caches shared between cpu cores and the gpu GT. Default on - * machines with HAS_LLC. + * struct drm_i915_gem_caching - Set or get the caching for given object + * handle. + * + * Allow userspace to control the GTT caching bits for a given object when the + * object is later mapped through the ppGTT(or GGTT on older platforms lacking + * ppGTT support, or if the object is used for scanout). Note that this might + * require unbinding the object from the GTT first, if its current caching value + * doesn't match. + * + * Note that this all changes on discrete platforms, starting from DG1, the + * set/get caching is no longer supported, and is now rejected. Instead the CPU + * caching attributes(WB vs WC) will become an immutable creation time property + * for the object, along with the GTT caching level. For now we don't expose any + * new uAPI for this, instead on DG1 this is all implicit, although this largely + * shouldn't matter since DG1 is coherent by default(without any way of + * controlling it). + * + * Implicit caching rules, starting from DG1: + * + * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) + * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and + * mapped as write-combined only. + * + * - Everything else is always allocated and mapped as write-back, with the + * guarantee that everything is also coherent with the GPU. + * + * Note that this is likely to change in the future again, where we might need + * more flexibility on future devices, so making this all explicit as part of a + * new &drm_i915_gem_create_ext extension is probable. + * + * Side note: Part of the reason for this is that changing the at-allocation-time CPU + * caching attributes for the pages might be required(and is expensive) if we + * need to then CPU map the pages later with different caching attributes. This + * inconsistent caching behaviour, while supported on x86, is not universally + * supported on other architectures. So for simplicity we opt for setting + * everything at creation time, whilst also making it immutable, on discrete + * platforms. */ -#define I915_CACHING_CACHED 1 -/** - * I915_CACHING_DISPLAY - * - * Special GPU caching mode which is coherent with the scanout engines. - * Transparently falls back to I915_CACHING_NONE on platforms where no special - * cache mode (like write-through or gfdt flushing) is available. The kernel - * automatically sets this mode when using a buffer as a scanout target. - * Userspace can manually set this mode to avoid a costly stall and clflush in - * the hotpath of drawing the first frame. - */ -#define I915_CACHING_DISPLAY 2 - struct drm_i915_gem_caching { /** - * Handle of the buffer to set/get the caching level of. */ + * @handle: Handle of the buffer to set/get the caching level. + */ __u32 handle; /** - * Cacheing level to apply or return value + * @caching: The GTT caching level to apply or possible return value. + * + * The supported @caching values: + * + * I915_CACHING_NONE: * - * bits0-15 are for generic caching control (i.e. the above defined - * values). bits16-31 are reserved for platform-specific variations - * (e.g. l3$ caching on gen7). */ + * GPU access is not coherent with CPU caches. Default for machines + * without an LLC. This means manual flushing might be needed, if we + * want GPU access to be coherent. + * + * I915_CACHING_CACHED: + * + * GPU access is coherent with CPU caches and furthermore the data is + * cached in last-level caches shared between CPU cores and the GPU GT. + * + * I915_CACHING_DISPLAY: + * + * Special GPU caching mode which is coherent with the scanout engines. + * Transparently falls back to I915_CACHING_NONE on platforms where no + * special cache mode (like write-through or gfdt flushing) is + * available. The kernel automatically sets this mode when using a + * buffer as a scanout target. Userspace can manually set this mode to + * avoid a costly stall and clflush in the hotpath of drawing the first + * frame. + */ +#define I915_CACHING_NONE 0 +#define I915_CACHING_CACHED 1 +#define I915_CACHING_DISPLAY 2 __u32 caching; }; @@ -1591,6 +1754,10 @@ struct drm_i915_gem_context_param { __u32 size; __u64 param; #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance + * someone somewhere has attempted to use it, never re-use this context + * param number. + */ #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 @@ -1675,24 +1842,8 @@ struct drm_i915_gem_context_param { */ #define I915_CONTEXT_PARAM_PERSISTENCE 0xb -/* - * I915_CONTEXT_PARAM_RINGSIZE: - * - * Sets the size of the CS ringbuffer to use for logical ring contexts. This - * applies a limit of how many batches can be queued to HW before the caller - * is blocked due to lack of space for more commands. - * - * Only reliably possible to be set prior to first use, i.e. during - * construction. At any later point, the current execution must be flushed as - * the ring can only be changed while the context is idle. Note, the ringsize - * can be specified as a constructor property, see - * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required. - * - * Only applies to the current set of engine and lost when those engines - * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES). - * - * Must be between 4 - 512 KiB, in intervals of page size [4 KiB]. - * Default is 16 KiB. +/* This API has been removed. On the off chance someone somewhere has + * attempted to use it, never re-use this context param number. */ #define I915_CONTEXT_PARAM_RINGSIZE 0xc /* Must be kept compact -- no holes and well documented */ @@ -1700,7 +1851,7 @@ struct drm_i915_gem_context_param { __u64 value; }; -/** +/* * Context SSEU programming * * It may be necessary for either functional or performance reason to configure @@ -1759,6 +1910,69 @@ struct drm_i915_gem_context_param_sseu { __u32 rsvd; }; +/** + * DOC: Virtual Engine uAPI + * + * Virtual engine is a concept where userspace is able to configure a set of + * physical engines, submit a batch buffer, and let the driver execute it on any + * engine from the set as it sees fit. + * + * This is primarily useful on parts which have multiple instances of a same + * class engine, like for example GT3+ Skylake parts with their two VCS engines. + * + * For instance userspace can enumerate all engines of a certain class using the + * previously described `Engine Discovery uAPI`_. After that userspace can + * create a GEM context with a placeholder slot for the virtual engine (using + * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class + * and instance respectively) and finally using the + * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in + * the same reserved slot. + * + * Example of creating a virtual engine and submitting a batch buffer to it: + * + * .. code-block:: C + * + * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = { + * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE, + * .engine_index = 0, // Place this virtual engine into engine map slot 0 + * .num_siblings = 2, + * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 }, + * { I915_ENGINE_CLASS_VIDEO, 1 }, }, + * }; + * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = { + * .engines = { { I915_ENGINE_CLASS_INVALID, + * I915_ENGINE_CLASS_INVALID_NONE } }, + * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension + * }; + * struct drm_i915_gem_context_create_ext_setparam p_engines = { + * .base = { + * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, + * }, + * .param = { + * .param = I915_CONTEXT_PARAM_ENGINES, + * .value = to_user_pointer(&engines), + * .size = sizeof(engines), + * }, + * }; + * struct drm_i915_gem_context_create_ext create = { + * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, + * .extensions = to_user_pointer(&p_engines); + * }; + * + * ctx_id = gem_context_create_ext(drm_fd, &create); + * + * // Now we have created a GEM context with its engine map containing a + * // single virtual engine. Submissions to this slot can go either to + * // vcs0 or vcs1, depending on the load balancing algorithm used inside + * // the driver. The load balancing is dynamic from one batch buffer to + * // another and transparent to userspace. + * + * ... + * execbuf.rsvd1 = ctx_id; + * execbuf.flags = 0; // Submits to index 0 which is the virtual engine + * gem_execbuf(drm_fd, &execbuf); + */ + /* * i915_context_engines_load_balance: * @@ -1835,6 +2049,61 @@ struct i915_context_engines_bond { struct i915_engine_class_instance engines[N__]; \ } __attribute__((packed)) name__ +/** + * DOC: Context Engine Map uAPI + * + * Context engine map is a new way of addressing engines when submitting batch- + * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT` + * inside the flags field of `struct drm_i915_gem_execbuffer2`. + * + * To use it created GEM contexts need to be configured with a list of engines + * the user is intending to submit to. This is accomplished using the + * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct + * i915_context_param_engines`. + * + * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the + * configured map. + * + * Example of creating such context and submitting against it: + * + * .. code-block:: C + * + * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = { + * .engines = { { I915_ENGINE_CLASS_RENDER, 0 }, + * { I915_ENGINE_CLASS_COPY, 0 } } + * }; + * struct drm_i915_gem_context_create_ext_setparam p_engines = { + * .base = { + * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, + * }, + * .param = { + * .param = I915_CONTEXT_PARAM_ENGINES, + * .value = to_user_pointer(&engines), + * .size = sizeof(engines), + * }, + * }; + * struct drm_i915_gem_context_create_ext create = { + * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, + * .extensions = to_user_pointer(&p_engines); + * }; + * + * ctx_id = gem_context_create_ext(drm_fd, &create); + * + * // We have now created a GEM context with two engines in the map: + * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines + * // will not be accessible from this context. + * + * ... + * execbuf.rsvd1 = ctx_id; + * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context + * gem_execbuf(drm_fd, &execbuf); + * + * ... + * execbuf.rsvd1 = ctx_id; + * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context + * gem_execbuf(drm_fd, &execbuf); + */ + struct i915_context_param_engines { __u64 extensions; /* linked chain of extension blocks, 0 terminates */ #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ @@ -1853,20 +2122,10 @@ struct drm_i915_gem_context_create_ext_setparam { struct drm_i915_gem_context_param param; }; -struct drm_i915_gem_context_create_ext_clone { +/* This API has been removed. On the off chance someone somewhere has + * attempted to use it, never re-use this extension number. + */ #define I915_CONTEXT_CREATE_EXT_CLONE 1 - struct i915_user_extension base; - __u32 clone_id; - __u32 flags; -#define I915_CONTEXT_CLONE_ENGINES (1u << 0) -#define I915_CONTEXT_CLONE_FLAGS (1u << 1) -#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2) -#define I915_CONTEXT_CLONE_SSEU (1u << 3) -#define I915_CONTEXT_CLONE_TIMELINE (1u << 4) -#define I915_CONTEXT_CLONE_VM (1u << 5) -#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1) - __u64 rsvd; -}; struct drm_i915_gem_context_destroy { __u32 ctx_id; @@ -1938,14 +2197,69 @@ struct drm_i915_reset_stats { __u32 pad; }; +/** + * struct drm_i915_gem_userptr - Create GEM object from user allocated memory. + * + * Userptr objects have several restrictions on what ioctls can be used with the + * object handle. + */ struct drm_i915_gem_userptr { + /** + * @user_ptr: The pointer to the allocated memory. + * + * Needs to be aligned to PAGE_SIZE. + */ __u64 user_ptr; + + /** + * @user_size: + * + * The size in bytes for the allocated memory. This will also become the + * object size. + * + * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE, + * or larger. + */ __u64 user_size; + + /** + * @flags: + * + * Supported flags: + * + * I915_USERPTR_READ_ONLY: + * + * Mark the object as readonly, this also means GPU access can only be + * readonly. This is only supported on HW which supports readonly access + * through the GTT. If the HW can't support readonly access, an error is + * returned. + * + * I915_USERPTR_PROBE: + * + * Probe the provided @user_ptr range and validate that the @user_ptr is + * indeed pointing to normal memory and that the range is also valid. + * For example if some garbage address is given to the kernel, then this + * should complain. + * + * Returns -EFAULT if the probe failed. + * + * Note that this doesn't populate the backing pages, and also doesn't + * guarantee that the object will remain valid when the object is + * eventually used. + * + * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE + * returns a non-zero value. + * + * I915_USERPTR_UNSYNCHRONIZED: + * + * NOT USED. Setting this flag will result in an error. + */ __u32 flags; #define I915_USERPTR_READ_ONLY 0x1 +#define I915_USERPTR_PROBE 0x2 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 /** - * Returned handle for the object. + * @handle: Returned handle for the object. * * Object handles are nonzero. */ @@ -2067,7 +2381,7 @@ struct drm_i915_perf_open_param { __u64 properties_ptr; }; -/** +/* * Enable data capture for a stream that was either opened in a disabled state * via I915_PERF_FLAG_DISABLED or was later disabled via * I915_PERF_IOCTL_DISABLE. @@ -2081,7 +2395,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) -/** +/* * Disable data capture for a stream. * * It is an error to try and read a stream that is disabled. @@ -2090,7 +2404,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) -/** +/* * Change metrics_set captured by a stream. * * If the stream is bound to a specific context, the configuration change @@ -2103,7 +2417,7 @@ struct drm_i915_perf_open_param { */ #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) -/** +/* * Common to all i915 perf records */ struct drm_i915_perf_record_header { @@ -2151,7 +2465,7 @@ enum drm_i915_perf_record_type { DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -/** +/* * Structure to upload perf dynamic configuration into the kernel. */ struct drm_i915_perf_oa_config { @@ -2172,53 +2486,95 @@ struct drm_i915_perf_oa_config { __u64 flex_regs_ptr; }; +/** + * struct drm_i915_query_item - An individual query for the kernel to process. + * + * The behaviour is determined by the @query_id. Note that exactly what + * @data_ptr is also depends on the specific @query_id. + */ struct drm_i915_query_item { + /** @query_id: The id for this query */ __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_ENGINE_INFO 2 #define DRM_I915_QUERY_PERF_CONFIG 3 +#define DRM_I915_QUERY_MEMORY_REGIONS 4 /* Must be kept compact -- no holes and well documented */ - /* + /** + * @length: + * * When set to zero by userspace, this is filled with the size of the - * data to be written at the data_ptr pointer. The kernel sets this + * data to be written at the @data_ptr pointer. The kernel sets this * value to a negative value to signal an error on a particular query * item. */ __s32 length; - /* + /** + * @flags: + * * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. * * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the - * following : - * - DRM_I915_QUERY_PERF_CONFIG_LIST - * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID - * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID + * following: + * + * - DRM_I915_QUERY_PERF_CONFIG_LIST + * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID + * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID */ __u32 flags; #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 - /* - * Data will be written at the location pointed by data_ptr when the - * value of length matches the length of the data to be written by the + /** + * @data_ptr: + * + * Data will be written at the location pointed by @data_ptr when the + * value of @length matches the length of the data to be written by the * kernel. */ __u64 data_ptr; }; +/** + * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the + * kernel to fill out. + * + * Note that this is generally a two step process for each struct + * drm_i915_query_item in the array: + * + * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct + * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The + * kernel will then fill in the size, in bytes, which tells userspace how + * memory it needs to allocate for the blob(say for an array of properties). + * + * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the + * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that + * the &drm_i915_query_item.length should still be the same as what the + * kernel previously set. At this point the kernel can fill in the blob. + * + * Note that for some query items it can make sense for userspace to just pass + * in a buffer/blob equal to or larger than the required size. In this case only + * a single ioctl call is needed. For some smaller query items this can work + * quite well. + * + */ struct drm_i915_query { + /** @num_items: The number of elements in the @items_ptr array */ __u32 num_items; - /* - * Unused for now. Must be cleared to zero. + /** + * @flags: Unused for now. Must be cleared to zero. */ __u32 flags; - /* - * This points to an array of num_items drm_i915_query_item structures. + /** + * @items_ptr: + * + * Pointer to an array of struct drm_i915_query_item. The number of + * array elements is @num_items. */ __u64 items_ptr; }; @@ -2287,26 +2643,96 @@ struct drm_i915_query_topology_info { }; /** + * DOC: Engine Discovery uAPI + * + * Engine discovery uAPI is a way of enumerating physical engines present in a + * GPU associated with an open i915 DRM file descriptor. This supersedes the old + * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like + * `I915_PARAM_HAS_BLT`. + * + * The need for this interface came starting with Icelake and newer GPUs, which + * started to establish a pattern of having multiple engines of a same class, + * where not all instances were always completely functionally equivalent. + * + * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the + * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id. + * + * Example for getting the list of engines: + * + * .. code-block:: C + * + * struct drm_i915_query_engine_info *info; + * struct drm_i915_query_item item = { + * .query_id = DRM_I915_QUERY_ENGINE_INFO; + * }; + * struct drm_i915_query query = { + * .num_items = 1, + * .items_ptr = (uintptr_t)&item, + * }; + * int err, i; + * + * // First query the size of the blob we need, this needs to be large + * // enough to hold our array of engines. The kernel will fill out the + * // item.length for us, which is the number of bytes we need. + * // + * // Alternatively a large buffer can be allocated straight away enabling + * // querying in one pass, in which case item.length should contain the + * // length of the provided buffer. + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * info = calloc(1, item.length); + * // Now that we allocated the required number of bytes, we call the ioctl + * // again, this time with the data_ptr pointing to our newly allocated + * // blob, which the kernel can then populate with info on all engines. + * item.data_ptr = (uintptr_t)&info, + * + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * // We can now access each engine in the array + * for (i = 0; i < info->num_engines; i++) { + * struct drm_i915_engine_info einfo = info->engines[i]; + * u16 class = einfo.engine.class; + * u16 instance = einfo.engine.instance; + * .... + * } + * + * free(info); + * + * Each of the enumerated engines, apart from being defined by its class and + * instance (see `struct i915_engine_class_instance`), also can have flags and + * capabilities defined as documented in i915_drm.h. + * + * For instance video engines which support HEVC encoding will have the + * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set. + * + * Engine discovery only fully comes to its own when combined with the new way + * of addressing engines when submitting batch buffers using contexts with + * engine maps configured. + */ + +/** * struct drm_i915_engine_info * * Describes one engine and it's capabilities as known to the driver. */ struct drm_i915_engine_info { - /** Engine class and instance. */ + /** @engine: Engine class and instance. */ struct i915_engine_class_instance engine; - /** Reserved field. */ + /** @rsvd0: Reserved field. */ __u32 rsvd0; - /** Engine flags. */ + /** @flags: Engine flags. */ __u64 flags; - /** Capabilities of this engine. */ + /** @capabilities: Capabilities of this engine. */ __u64 capabilities; #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) - /** Reserved fields. */ + /** @rsvd1: Reserved fields. */ __u64 rsvd1[4]; }; @@ -2317,13 +2743,13 @@ struct drm_i915_engine_info { * an array of struct drm_i915_engine_info structures. */ struct drm_i915_query_engine_info { - /** Number of struct drm_i915_engine_info structs following. */ + /** @num_engines: Number of struct drm_i915_engine_info structs following. */ __u32 num_engines; - /** MBZ */ + /** @rsvd: MBZ */ __u32 rsvd[3]; - /** Marker for drm_i915_engine_info structures. */ + /** @engines: Marker for drm_i915_engine_info structures. */ struct drm_i915_engine_info engines[]; }; @@ -2377,6 +2803,241 @@ struct drm_i915_query_perf_config { __u8 data[]; }; +/** + * enum drm_i915_gem_memory_class - Supported memory classes + */ +enum drm_i915_gem_memory_class { + /** @I915_MEMORY_CLASS_SYSTEM: System memory */ + I915_MEMORY_CLASS_SYSTEM = 0, + /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ + I915_MEMORY_CLASS_DEVICE, +}; + +/** + * struct drm_i915_gem_memory_class_instance - Identify particular memory region + */ +struct drm_i915_gem_memory_class_instance { + /** @memory_class: See enum drm_i915_gem_memory_class */ + __u16 memory_class; + + /** @memory_instance: Which instance */ + __u16 memory_instance; +}; + +/** + * struct drm_i915_memory_region_info - Describes one region as known to the + * driver. + * + * Note that we reserve some stuff here for potential future work. As an example + * we might want expose the capabilities for a given region, which could include + * things like if the region is CPU mappable/accessible, what are the supported + * mapping types etc. + * + * Note that to extend struct drm_i915_memory_region_info and struct + * drm_i915_query_memory_regions in the future the plan is to do the following: + * + * .. code-block:: C + * + * struct drm_i915_memory_region_info { + * struct drm_i915_gem_memory_class_instance region; + * union { + * __u32 rsvd0; + * __u32 new_thing1; + * }; + * ... + * union { + * __u64 rsvd1[8]; + * struct { + * __u64 new_thing2; + * __u64 new_thing3; + * ... + * }; + * }; + * }; + * + * With this things should remain source compatible between versions for + * userspace, even as we add new fields. + * + * Note this is using both struct drm_i915_query_item and struct drm_i915_query. + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS + * at &drm_i915_query_item.query_id. + */ +struct drm_i915_memory_region_info { + /** @region: The class:instance pair encoding */ + struct drm_i915_gem_memory_class_instance region; + + /** @rsvd0: MBZ */ + __u32 rsvd0; + + /** @probed_size: Memory probed by the driver (-1 = unknown) */ + __u64 probed_size; + + /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */ + __u64 unallocated_size; + + /** @rsvd1: MBZ */ + __u64 rsvd1[8]; +}; + +/** + * struct drm_i915_query_memory_regions + * + * The region info query enumerates all regions known to the driver by filling + * in an array of struct drm_i915_memory_region_info structures. + * + * Example for getting the list of supported regions: + * + * .. code-block:: C + * + * struct drm_i915_query_memory_regions *info; + * struct drm_i915_query_item item = { + * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; + * }; + * struct drm_i915_query query = { + * .num_items = 1, + * .items_ptr = (uintptr_t)&item, + * }; + * int err, i; + * + * // First query the size of the blob we need, this needs to be large + * // enough to hold our array of regions. The kernel will fill out the + * // item.length for us, which is the number of bytes we need. + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * info = calloc(1, item.length); + * // Now that we allocated the required number of bytes, we call the ioctl + * // again, this time with the data_ptr pointing to our newly allocated + * // blob, which the kernel can then populate with the all the region info. + * item.data_ptr = (uintptr_t)&info, + * + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); + * if (err) ... + * + * // We can now access each region in the array + * for (i = 0; i < info->num_regions; i++) { + * struct drm_i915_memory_region_info mr = info->regions[i]; + * u16 class = mr.region.class; + * u16 instance = mr.region.instance; + * + * .... + * } + * + * free(info); + */ +struct drm_i915_query_memory_regions { + /** @num_regions: Number of supported regions */ + __u32 num_regions; + + /** @rsvd: MBZ */ + __u32 rsvd[3]; + + /** @regions: Info about each supported region */ + struct drm_i915_memory_region_info regions[]; +}; + +/** + * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added + * extension support using struct i915_user_extension. + * + * Note that in the future we want to have our buffer flags here, at least for + * the stuff that is immutable. Previously we would have two ioctls, one to + * create the object with gem_create, and another to apply various parameters, + * however this creates some ambiguity for the params which are considered + * immutable. Also in general we're phasing out the various SET/GET ioctls. + */ +struct drm_i915_gem_create_ext { + /** + * @size: Requested size for the object. + * + * The (page-aligned) allocated size for the object will be returned. + * + * Note that for some devices we have might have further minimum + * page-size restrictions(larger than 4K), like for device local-memory. + * However in general the final size here should always reflect any + * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS + * extension to place the object in device local-memory. + */ + __u64 size; + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + /** @flags: MBZ */ + __u32 flags; + /** + * @extensions: The chain of extensions to apply to this object. + * + * This will be useful in the future when we need to support several + * different extensions, and we need to apply more than one when + * creating the object. See struct i915_user_extension. + * + * If we don't supply any extensions then we get the same old gem_create + * behaviour. + * + * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see + * struct drm_i915_gem_create_ext_memory_regions. + */ +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 + __u64 extensions; +}; + +/** + * struct drm_i915_gem_create_ext_memory_regions - The + * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. + * + * Set the object with the desired set of placements/regions in priority + * order. Each entry must be unique and supported by the device. + * + * This is provided as an array of struct drm_i915_gem_memory_class_instance, or + * an equivalent layout of class:instance pair encodings. See struct + * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to + * query the supported regions for a device. + * + * As an example, on discrete devices, if we wish to set the placement as + * device local-memory we can do something like: + * + * .. code-block:: C + * + * struct drm_i915_gem_memory_class_instance region_lmem = { + * .memory_class = I915_MEMORY_CLASS_DEVICE, + * .memory_instance = 0, + * }; + * struct drm_i915_gem_create_ext_memory_regions regions = { + * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, + * .regions = (uintptr_t)®ion_lmem, + * .num_regions = 1, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = 16 * PAGE_SIZE, + * .extensions = (uintptr_t)®ions, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + * + * At which point we get the object handle in &drm_i915_gem_create_ext.handle, + * along with the final object size in &drm_i915_gem_create_ext.size, which + * should account for any rounding up, if required. + */ +struct drm_i915_gem_create_ext_memory_regions { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + + /** @pad: MBZ */ + __u32 pad; + /** @num_regions: Number of elements in the @regions array. */ + __u32 num_regions; + /** + * @regions: The regions/placements array. + * + * An array of struct drm_i915_gem_memory_class_instance. + */ + __u64 regions; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 5596d7c37f9e..6b8fffc28a50 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -73,11 +73,19 @@ struct drm_msm_timespec { #define MSM_PARAM_MAX_FREQ 0x04 #define MSM_PARAM_TIMESTAMP 0x05 #define MSM_PARAM_GMEM_BASE 0x06 -#define MSM_PARAM_NR_RINGS 0x07 +#define MSM_PARAM_PRIORITIES 0x07 /* The # of priority levels */ #define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */ #define MSM_PARAM_FAULTS 0x09 #define MSM_PARAM_SUSPENDS 0x0a +/* For backwards compat. The original support for preemption was based on + * a single ring per priority level so # of priority levels equals the # + * of rings. With drm/scheduler providing additional levels of priority, + * the number of priorities is greater than the # of rings. The param is + * renamed to better reflect this. + */ +#define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES + struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ __u32 param; /* in, MSM_PARAM_x */ @@ -94,13 +102,12 @@ struct drm_msm_param { /* cache modes */ #define MSM_BO_CACHED 0x00010000 #define MSM_BO_WC 0x00020000 -#define MSM_BO_UNCACHED 0x00040000 +#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */ +#define MSM_BO_CACHED_COHERENT 0x080000 #define MSM_BO_FLAGS (MSM_BO_SCANOUT | \ MSM_BO_GPU_READONLY | \ - MSM_BO_CACHED | \ - MSM_BO_WC | \ - MSM_BO_UNCACHED) + MSM_BO_CACHE_MASK) struct drm_msm_gem_new { __u64 size; /* in */ @@ -305,6 +312,10 @@ struct drm_msm_gem_madvise { #define MSM_SUBMITQUEUE_FLAGS (0) +/* + * The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1, + * a lower numeric value is higher priority. + */ struct drm_msm_submitqueue { __u32 flags; /* in, MSM_SUBMITQUEUE_x */ __u32 prio; /* in, Priority level */ diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index ec19db1eead8..061e700dd06c 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -171,6 +171,7 @@ enum drm_panfrost_param { DRM_PANFROST_PARAM_JS_FEATURES15, DRM_PANFROST_PARAM_NR_CORE_GROUPS, DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, + DRM_PANFROST_PARAM_AFBC_FEATURES, }; struct drm_panfrost_get_param { diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h index c4df3c3668b3..94cfc306d50a 100644 --- a/include/uapi/drm/tegra_drm.h +++ b/include/uapi/drm/tegra_drm.h @@ -1,24 +1,5 @@ -/* - * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ +/* SPDX-License-Identifier: MIT */ +/* Copyright (c) 2012-2020 NVIDIA Corporation */ #ifndef _UAPI_TEGRA_DRM_H_ #define _UAPI_TEGRA_DRM_H_ @@ -29,6 +10,8 @@ extern "C" { #endif +/* Tegra DRM legacy UAPI. Only enabled with STAGING */ + #define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) @@ -649,8 +632,8 @@ struct drm_tegra_gem_get_flags { #define DRM_TEGRA_SYNCPT_READ 0x02 #define DRM_TEGRA_SYNCPT_INCR 0x03 #define DRM_TEGRA_SYNCPT_WAIT 0x04 -#define DRM_TEGRA_OPEN_CHANNEL 0x05 -#define DRM_TEGRA_CLOSE_CHANNEL 0x06 +#define DRM_TEGRA_OPEN_CHANNEL 0x05 +#define DRM_TEGRA_CLOSE_CHANNEL 0x06 #define DRM_TEGRA_GET_SYNCPT 0x07 #define DRM_TEGRA_SUBMIT 0x08 #define DRM_TEGRA_GET_SYNCPT_BASE 0x09 @@ -674,6 +657,402 @@ struct drm_tegra_gem_get_flags { #define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags) #define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags) +/* New Tegra DRM UAPI */ + +/* + * Reported by the driver in the `capabilities` field. + * + * DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT: If set, the engine is cache coherent + * with regard to the system memory. + */ +#define DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT (1 << 0) + +struct drm_tegra_channel_open { + /** + * @host1x_class: [in] + * + * Host1x class of the engine that will be programmed using this + * channel. + */ + __u32 host1x_class; + + /** + * @flags: [in] + * + * Flags. + */ + __u32 flags; + + /** + * @context: [out] + * + * Opaque identifier corresponding to the opened channel. + */ + __u32 context; + + /** + * @version: [out] + * + * Version of the engine hardware. This can be used by userspace + * to determine how the engine needs to be programmed. + */ + __u32 version; + + /** + * @capabilities: [out] + * + * Flags describing the hardware capabilities. + */ + __u32 capabilities; + __u32 padding; +}; + +struct drm_tegra_channel_close { + /** + * @context: [in] + * + * Identifier of the channel to close. + */ + __u32 context; + __u32 padding; +}; + +/* + * Mapping flags that can be used to influence how the mapping is created. + * + * DRM_TEGRA_CHANNEL_MAP_READ: create mapping that allows HW read access + * DRM_TEGRA_CHANNEL_MAP_WRITE: create mapping that allows HW write access + */ +#define DRM_TEGRA_CHANNEL_MAP_READ (1 << 0) +#define DRM_TEGRA_CHANNEL_MAP_WRITE (1 << 1) +#define DRM_TEGRA_CHANNEL_MAP_READ_WRITE (DRM_TEGRA_CHANNEL_MAP_READ | \ + DRM_TEGRA_CHANNEL_MAP_WRITE) + +struct drm_tegra_channel_map { + /** + * @context: [in] + * + * Identifier of the channel to which make memory available for. + */ + __u32 context; + + /** + * @handle: [in] + * + * GEM handle of the memory to map. + */ + __u32 handle; + + /** + * @flags: [in] + * + * Flags. + */ + __u32 flags; + + /** + * @mapping: [out] + * + * Identifier corresponding to the mapping, to be used for + * relocations or unmapping later. + */ + __u32 mapping; +}; + +struct drm_tegra_channel_unmap { + /** + * @context: [in] + * + * Channel identifier of the channel to unmap memory from. + */ + __u32 context; + + /** + * @mapping: [in] + * + * Mapping identifier of the memory mapping to unmap. + */ + __u32 mapping; +}; + +/* Submission */ + +/** + * Specify that bit 39 of the patched-in address should be set to switch + * swizzling between Tegra and non-Tegra sector layout on systems that store + * surfaces in system memory in non-Tegra sector layout. + */ +#define DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT (1 << 0) + +struct drm_tegra_submit_buf { + /** + * @mapping: [in] + * + * Identifier of the mapping to use in the submission. + */ + __u32 mapping; + + /** + * @flags: [in] + * + * Flags. + */ + __u32 flags; + + /** + * Information for relocation patching. + */ + struct { + /** + * @target_offset: [in] + * + * Offset from the start of the mapping of the data whose + * address is to be patched into the gather. + */ + __u64 target_offset; + + /** + * @gather_offset_words: [in] + * + * Offset in words from the start of the gather data to + * where the address should be patched into. + */ + __u32 gather_offset_words; + + /** + * @shift: [in] + * + * Number of bits the address should be shifted right before + * patching in. + */ + __u32 shift; + } reloc; +}; + +/** + * Execute `words` words of Host1x opcodes specified in the `gather_data_ptr` + * buffer. Each GATHER_UPTR command uses successive words from the buffer. + */ +#define DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR 0 +/** + * Wait for a syncpoint to reach a value before continuing with further + * commands. + */ +#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT 1 +/** + * Wait for a syncpoint to reach a value before continuing with further + * commands. The threshold is calculated relative to the start of the job. + */ +#define DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE 2 + +struct drm_tegra_submit_cmd_gather_uptr { + __u32 words; + __u32 reserved[3]; +}; + +struct drm_tegra_submit_cmd_wait_syncpt { + __u32 id; + __u32 value; + __u32 reserved[2]; +}; + +struct drm_tegra_submit_cmd { + /** + * @type: [in] + * + * Command type to execute. One of the DRM_TEGRA_SUBMIT_CMD* + * defines. + */ + __u32 type; + + /** + * @flags: [in] + * + * Flags. + */ + __u32 flags; + + union { + struct drm_tegra_submit_cmd_gather_uptr gather_uptr; + struct drm_tegra_submit_cmd_wait_syncpt wait_syncpt; + __u32 reserved[4]; + }; +}; + +struct drm_tegra_submit_syncpt { + /** + * @id: [in] + * + * ID of the syncpoint that the job will increment. + */ + __u32 id; + + /** + * @flags: [in] + * + * Flags. + */ + __u32 flags; + + /** + * @increments: [in] + * + * Number of times the job will increment this syncpoint. + */ + __u32 increments; + + /** + * @value: [out] + * + * Value the syncpoint will have once the job has completed all + * its specified syncpoint increments. + * + * Note that the kernel may increment the syncpoint before or after + * the job. These increments are not reflected in this field. + * + * If the job hangs or times out, not all of the increments may + * get executed. + */ + __u32 value; +}; + +struct drm_tegra_channel_submit { + /** + * @context: [in] + * + * Identifier of the channel to submit this job to. + */ + __u32 context; + + /** + * @num_bufs: [in] + * + * Number of elements in the `bufs_ptr` array. + */ + __u32 num_bufs; + + /** + * @num_cmds: [in] + * + * Number of elements in the `cmds_ptr` array. + */ + __u32 num_cmds; + + /** + * @gather_data_words: [in] + * + * Number of 32-bit words in the `gather_data_ptr` array. + */ + __u32 gather_data_words; + + /** + * @bufs_ptr: [in] + * + * Pointer to an array of drm_tegra_submit_buf structures. + */ + __u64 bufs_ptr; + + /** + * @cmds_ptr: [in] + * + * Pointer to an array of drm_tegra_submit_cmd structures. + */ + __u64 cmds_ptr; + + /** + * @gather_data_ptr: [in] + * + * Pointer to an array of Host1x opcodes to be used by GATHER_UPTR + * commands. + */ + __u64 gather_data_ptr; + + /** + * @syncobj_in: [in] + * + * Handle for DRM syncobj that will be waited before submission. + * Ignored if zero. + */ + __u32 syncobj_in; + + /** + * @syncobj_out: [in] + * + * Handle for DRM syncobj that will have its fence replaced with + * the job's completion fence. Ignored if zero. + */ + __u32 syncobj_out; + + /** + * @syncpt_incr: [in,out] + * + * Information about the syncpoint the job will increment. + */ + struct drm_tegra_submit_syncpt syncpt; +}; + +struct drm_tegra_syncpoint_allocate { + /** + * @id: [out] + * + * ID of allocated syncpoint. + */ + __u32 id; + __u32 padding; +}; + +struct drm_tegra_syncpoint_free { + /** + * @id: [in] + * + * ID of syncpoint to free. + */ + __u32 id; + __u32 padding; +}; + +struct drm_tegra_syncpoint_wait { + /** + * @timeout: [in] + * + * Absolute timestamp at which the wait will time out. + */ + __s64 timeout_ns; + + /** + * @id: [in] + * + * ID of syncpoint to wait on. + */ + __u32 id; + + /** + * @threshold: [in] + * + * Threshold to wait for. + */ + __u32 threshold; + + /** + * @value: [out] + * + * Value of the syncpoint upon wait completion. + */ + __u32 value; + + __u32 padding; +}; + +#define DRM_IOCTL_TEGRA_CHANNEL_OPEN DRM_IOWR(DRM_COMMAND_BASE + 0x10, struct drm_tegra_channel_open) +#define DRM_IOCTL_TEGRA_CHANNEL_CLOSE DRM_IOWR(DRM_COMMAND_BASE + 0x11, struct drm_tegra_channel_close) +#define DRM_IOCTL_TEGRA_CHANNEL_MAP DRM_IOWR(DRM_COMMAND_BASE + 0x12, struct drm_tegra_channel_map) +#define DRM_IOCTL_TEGRA_CHANNEL_UNMAP DRM_IOWR(DRM_COMMAND_BASE + 0x13, struct drm_tegra_channel_unmap) +#define DRM_IOCTL_TEGRA_CHANNEL_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + 0x14, struct drm_tegra_channel_submit) + +#define DRM_IOCTL_TEGRA_SYNCPOINT_ALLOCATE DRM_IOWR(DRM_COMMAND_BASE + 0x20, struct drm_tegra_syncpoint_allocate) +#define DRM_IOCTL_TEGRA_SYNCPOINT_FREE DRM_IOWR(DRM_COMMAND_BASE + 0x21, struct drm_tegra_syncpoint_free) +#define DRM_IOCTL_TEGRA_SYNCPOINT_WAIT DRM_IOWR(DRM_COMMAND_BASE + 0x22, struct drm_tegra_syncpoint_wait) + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 1ce746e228d9..4104f22fb3d3 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -38,6 +38,9 @@ extern "C" { #define DRM_V3D_GET_BO_OFFSET 0x05 #define DRM_V3D_SUBMIT_TFU 0x06 #define DRM_V3D_SUBMIT_CSD 0x07 +#define DRM_V3D_PERFMON_CREATE 0x08 +#define DRM_V3D_PERFMON_DESTROY 0x09 +#define DRM_V3D_PERFMON_GET_VALUES 0x0a #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -47,6 +50,12 @@ extern "C" { #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd) +#define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \ + struct drm_v3d_perfmon_create) +#define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \ + struct drm_v3d_perfmon_destroy) +#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ + struct drm_v3d_perfmon_get_values) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 @@ -127,6 +136,11 @@ struct drm_v3d_submit_cl { __u32 bo_handle_count; __u32 flags; + + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ + __u32 perfmon_id; + + __u32 pad; }; /** @@ -195,6 +209,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_TFU, DRM_V3D_PARAM_SUPPORTS_CSD, DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, + DRM_V3D_PARAM_SUPPORTS_PERFMON, }; struct drm_v3d_get_param { @@ -258,6 +273,127 @@ struct drm_v3d_submit_csd { __u32 in_sync; /* Sync object to signal when the CSD job is done. */ __u32 out_sync; + + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ + __u32 perfmon_id; +}; + +enum { + V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, + V3D_PERFCNT_FEP_VALID_PRIMS, + V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS, + V3D_PERFCNT_FEP_VALID_QUADS, + V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL, + V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL, + V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS, + V3D_PERFCNT_TLB_QUADS_ZERO_COV, + V3D_PERFCNT_TLB_QUADS_NONZERO_COV, + V3D_PERFCNT_TLB_QUADS_WRITTEN, + V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD, + V3D_PERFCNT_PTB_PRIM_CLIP, + V3D_PERFCNT_PTB_PRIM_REV, + V3D_PERFCNT_QPU_IDLE_CYCLES, + V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER, + V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG, + V3D_PERFCNT_QPU_CYCLES_VALID_INSTR, + V3D_PERFCNT_QPU_CYCLES_TMU_STALL, + V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL, + V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL, + V3D_PERFCNT_QPU_IC_HIT, + V3D_PERFCNT_QPU_IC_MISS, + V3D_PERFCNT_QPU_UC_HIT, + V3D_PERFCNT_QPU_UC_MISS, + V3D_PERFCNT_TMU_TCACHE_ACCESS, + V3D_PERFCNT_TMU_TCACHE_MISS, + V3D_PERFCNT_VPM_VDW_STALL, + V3D_PERFCNT_VPM_VCD_STALL, + V3D_PERFCNT_BIN_ACTIVE, + V3D_PERFCNT_RDR_ACTIVE, + V3D_PERFCNT_L2T_HITS, + V3D_PERFCNT_L2T_MISSES, + V3D_PERFCNT_CYCLE_COUNT, + V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER, + V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT, + V3D_PERFCNT_PTB_PRIMS_BINNED, + V3D_PERFCNT_AXI_WRITES_WATCH_0, + V3D_PERFCNT_AXI_READS_WATCH_0, + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0, + V3D_PERFCNT_AXI_READ_STALLS_WATCH_0, + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0, + V3D_PERFCNT_AXI_READ_BYTES_WATCH_0, + V3D_PERFCNT_AXI_WRITES_WATCH_1, + V3D_PERFCNT_AXI_READS_WATCH_1, + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1, + V3D_PERFCNT_AXI_READ_STALLS_WATCH_1, + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1, + V3D_PERFCNT_AXI_READ_BYTES_WATCH_1, + V3D_PERFCNT_TLB_PARTIAL_QUADS, + V3D_PERFCNT_TMU_CONFIG_ACCESSES, + V3D_PERFCNT_L2T_NO_ID_STALL, + V3D_PERFCNT_L2T_COM_QUE_STALL, + V3D_PERFCNT_L2T_TMU_WRITES, + V3D_PERFCNT_TMU_ACTIVE_CYCLES, + V3D_PERFCNT_TMU_STALLED_CYCLES, + V3D_PERFCNT_CLE_ACTIVE, + V3D_PERFCNT_L2T_TMU_READS, + V3D_PERFCNT_L2T_CLE_READS, + V3D_PERFCNT_L2T_VCD_READS, + V3D_PERFCNT_L2T_TMUCFG_READS, + V3D_PERFCNT_L2T_SLC0_READS, + V3D_PERFCNT_L2T_SLC1_READS, + V3D_PERFCNT_L2T_SLC2_READS, + V3D_PERFCNT_L2T_TMU_W_MISSES, + V3D_PERFCNT_L2T_TMU_R_MISSES, + V3D_PERFCNT_L2T_CLE_MISSES, + V3D_PERFCNT_L2T_VCD_MISSES, + V3D_PERFCNT_L2T_TMUCFG_MISSES, + V3D_PERFCNT_L2T_SLC0_MISSES, + V3D_PERFCNT_L2T_SLC1_MISSES, + V3D_PERFCNT_L2T_SLC2_MISSES, + V3D_PERFCNT_CORE_MEM_WRITES, + V3D_PERFCNT_L2T_MEM_WRITES, + V3D_PERFCNT_PTB_MEM_WRITES, + V3D_PERFCNT_TLB_MEM_WRITES, + V3D_PERFCNT_CORE_MEM_READS, + V3D_PERFCNT_L2T_MEM_READS, + V3D_PERFCNT_PTB_MEM_READS, + V3D_PERFCNT_PSE_MEM_READS, + V3D_PERFCNT_TLB_MEM_READS, + V3D_PERFCNT_GMP_MEM_READS, + V3D_PERFCNT_PTB_W_MEM_WORDS, + V3D_PERFCNT_TLB_W_MEM_WORDS, + V3D_PERFCNT_PSE_R_MEM_WORDS, + V3D_PERFCNT_TLB_R_MEM_WORDS, + V3D_PERFCNT_TMU_MRU_HITS, + V3D_PERFCNT_COMPUTE_ACTIVE, + V3D_PERFCNT_NUM, +}; + +#define DRM_V3D_MAX_PERF_COUNTERS 32 + +struct drm_v3d_perfmon_create { + __u32 id; + __u32 ncounters; + __u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; +}; + +struct drm_v3d_perfmon_destroy { + __u32 id; +}; + +/* + * Returns the values of the performance counters tracked by this + * perfmon (as an array of ncounters u64 values). + * + * No implicit synchronization is performed, so the user has to + * guarantee that any jobs using this perfmon have already been + * completed (probably by blocking on the seqno returned by the + * last exec that used the perfmon). + */ +struct drm_v3d_perfmon_get_values { + __u32 id; + __u32 pad; + __u64 values_ptr; }; #if defined(__cplusplus) diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 02e917507479..9078775feb51 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -72,6 +72,9 @@ extern "C" { #define DRM_VMW_GB_SURFACE_CREATE_EXT 27 #define DRM_VMW_GB_SURFACE_REF_EXT 28 #define DRM_VMW_MSG 29 +#define DRM_VMW_MKSSTAT_RESET 30 +#define DRM_VMW_MKSSTAT_ADD 31 +#define DRM_VMW_MKSSTAT_REMOVE 32 /*************************************************************************/ /** @@ -1236,6 +1239,44 @@ struct drm_vmw_msg_arg { __u32 receive_len; }; +/** + * struct drm_vmw_mksstat_add_arg + * + * @stat: Pointer to user-space stat-counters array, page-aligned. + * @info: Pointer to user-space counter-infos array, page-aligned. + * @strs: Pointer to user-space stat strings, page-aligned. + * @stat_len: Length in bytes of stat-counters array. + * @info_len: Length in bytes of counter-infos array. + * @strs_len: Length in bytes of the stat strings, terminators included. + * @description: Pointer to instance descriptor string; will be truncated + * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars. + * @id: Output identifier of the produced record; -1 if error. + * + * Argument to the DRM_VMW_MKSSTAT_ADD ioctl. + */ +struct drm_vmw_mksstat_add_arg { + __u64 stat; + __u64 info; + __u64 strs; + __u64 stat_len; + __u64 info_len; + __u64 strs_len; + __u64 description; + __u64 id; +}; + +/** + * struct drm_vmw_mksstat_remove_arg + * + * @id: Identifier of the record being disposed, originally obtained through + * DRM_VMW_MKSSTAT_ADD ioctl. + * + * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl. + */ +struct drm_vmw_mksstat_remove_arg { + __u64 id; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index cd2d8279a5e4..daa481729e9b 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -48,7 +48,7 @@ * 2500 - 2999 future user space (maybe integrity labels and related events) * * Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are - * exclusively user space. 1300-2099 is kernel --> user space + * exclusively user space. 1300-2099 is kernel --> user space * communication. */ #define AUDIT_GET 1000 /* Get status */ @@ -78,7 +78,7 @@ #define AUDIT_LAST_USER_MSG 1199 #define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */ #define AUDIT_LAST_USER_MSG2 2999 - + #define AUDIT_DAEMON_START 1200 /* Daemon startup record */ #define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ #define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h index abe5f2b6581b..c7e502bf5a6f 100644 --- a/include/uapi/linux/auxvec.h +++ b/include/uapi/linux/auxvec.h @@ -33,5 +33,8 @@ #define AT_EXECFN 31 /* filename of program */ +#ifndef AT_MINSIGSTKSZ +#define AT_MINSIGSTKSZ 51 /* minimal stack size for signal delivery */ +#endif #endif /* _UAPI_LINUX_AUXVEC_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ec6d85a81744..791f31dd0abe 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -84,7 +84,7 @@ struct bpf_lpm_trie_key { struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ - __u32 attach_type; /* program attach type */ + __u32 attach_type; /* program attach type (enum bpf_attach_type) */ }; union bpf_iter_link_info { @@ -324,9 +324,6 @@ union bpf_iter_link_info { * **BPF_PROG_TYPE_SK_LOOKUP** * *data_in* and *data_out* must be NULL. * - * **BPF_PROG_TYPE_XDP** - * *ctx_in* and *ctx_out* must be NULL. - * * **BPF_PROG_TYPE_RAW_TRACEPOINT**, * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** * @@ -527,6 +524,15 @@ union bpf_iter_link_info { * Look up an element with the given *key* in the map referred to * by the file descriptor *fd*, and if found, delete the element. * + * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map + * types, the *flags* argument needs to be set to 0, but for other + * map types, it may be specified as: + * + * **BPF_F_LOCK** + * Look up and delete the value of a spin-locked map + * without returning the lock. This must be specified if + * the elements contain a spinlock. + * * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types * implement this command as a "pop" operation, deleting the top * element rather than one corresponding to *key*. @@ -536,6 +542,10 @@ union bpf_iter_link_info { * This command is only valid for the following map types: * * **BPF_MAP_TYPE_QUEUE** * * **BPF_MAP_TYPE_STACK** + * * **BPF_MAP_TYPE_HASH** + * * **BPF_MAP_TYPE_PERCPU_HASH** + * * **BPF_MAP_TYPE_LRU_HASH** + * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** * * Return * Returns zero on success. On error, -1 is returned and *errno* @@ -837,6 +847,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, @@ -937,6 +948,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ }; enum bpf_attach_type { @@ -979,6 +991,9 @@ enum bpf_attach_type { BPF_SK_LOOKUP, BPF_XDP, BPF_SK_SKB_VERDICT, + BPF_SK_REUSEPORT_SELECT, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, + BPF_PERF_EVENT, __MAX_BPF_ATTACH_TYPE }; @@ -992,6 +1007,7 @@ enum bpf_link_type { BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, MAX_BPF_LINK_TYPE, }; @@ -1097,8 +1113,8 @@ enum bpf_link_type { /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * - * insn[0].src_reg: BPF_PSEUDO_MAP_FD - * insn[0].imm: map fd + * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] + * insn[0].imm: map fd or fd_idx * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 @@ -1106,15 +1122,19 @@ enum bpf_link_type { * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 -/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE - * insn[0].imm: map fd +#define BPF_PSEUDO_MAP_IDX 5 + +/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE + * insn[0].imm: map fd or fd_idx * insn[1].imm: offset into value * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map[0]+offset * verifier type: PTR_TO_MAP_VALUE */ -#define BPF_PSEUDO_MAP_VALUE 2 +#define BPF_PSEUDO_MAP_VALUE 2 +#define BPF_PSEUDO_MAP_IDX_VALUE 6 + /* insn[0].src_reg: BPF_PSEUDO_BTF_ID * insn[0].imm: kernel btd id of VAR * insn[1].imm: 0 @@ -1314,6 +1334,8 @@ union bpf_attr { /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; + __u32 :32; /* pad */ + __aligned_u64 fd_array; /* array of FDs */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1426,6 +1448,13 @@ union bpf_attr { __aligned_u64 iter_info; /* extra bpf_iter_link_info */ __u32 iter_info_len; /* iter_info length */ }; + struct { + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 bpf_cookie; + } perf_event; }; } link_create; @@ -2534,8 +2563,12 @@ union bpf_attr { * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be * one of the XDP program return codes up to **XDP_TX**, as chosen - * by the caller. Any higher bits in the *flags* argument must be - * unset. + * by the caller. The higher bits of *flags* can be set to + * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. + * + * With BPF_F_BROADCAST the packet will be broadcasted to all the + * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress + * interface will be excluded when do broadcasting. * * See also **bpf_redirect**\ (), which only supports redirecting * to an ifindex, but doesn't require a map to do so. @@ -3222,7 +3255,7 @@ union bpf_attr { * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a - * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. + * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * Return @@ -4735,6 +4768,115 @@ union bpf_attr { * be zero-terminated except when **str_size** is 0. * * Or **-EBUSY** if the per-CPU memory copy buffer is busy. + * + * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) + * Description + * Execute bpf syscall with given arguments. + * Return + * A syscall result. + * + * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) + * Description + * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. + * Return + * Returns btf_id and btf_obj_fd in lower and upper 32 bits. + * + * long bpf_sys_close(u32 fd) + * Description + * Execute close syscall for given FD. + * Return + * A syscall result. + * + * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) + * Description + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * Return + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) + * Description + * Configure the timer to call *callback_fn* static function. + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) + * Description + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + * + * long bpf_timer_cancel(struct bpf_timer *timer) + * Description + * Cancel the timer and wait for callback_fn to finish if it was running. + * Return + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. + * + * u64 bpf_get_func_ip(void *ctx) + * Description + * Get address of the traced function (for tracing and kprobe programs). + * Return + * Address of the traced function. + * + * u64 bpf_get_attach_cookie(void *ctx) + * Description + * Get bpf_cookie value provided (optionally) during the program + * attachment. It might be different for each individual + * attachment, even if BPF program itself is the same. + * Expects BPF program context *ctx* as a first argument. + * + * Supported for the following program types: + * - kprobe/uprobe; + * - tracepoint; + * - perf_event. + * Return + * Value specified by user at BPF link creation/attachment time + * or 0, if it was not specified. + * + * long bpf_task_pt_regs(struct task_struct *task) + * Description + * Get the struct pt_regs associated with **task**. + * Return + * A pointer to struct pt_regs. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4903,6 +5045,16 @@ union bpf_attr { FN(check_mtu), \ FN(for_each_map_elem), \ FN(snprintf), \ + FN(sys_bpf), \ + FN(btf_find_by_name_kind), \ + FN(sys_close), \ + FN(timer_init), \ + FN(timer_set_callback), \ + FN(timer_start), \ + FN(timer_cancel), \ + FN(get_func_ip), \ + FN(get_attach_cookie), \ + FN(task_pt_regs), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5080,6 +5232,12 @@ enum { BPF_F_BPRM_SECUREEXEC = (1ULL << 0), }; +/* Flags for bpf_redirect_map helper */ +enum { + BPF_F_BROADCAST = (1ULL << 3), + BPF_F_EXCLUDE_INGRESS = (1ULL << 4), +}; + #define __bpf_md_ptr(type, name) \ union { \ type name; \ @@ -5364,6 +5522,20 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + /* When reuse->migrating_sk is NULL, it is selecting a sk for the + * new incoming connection request (e.g. selecting a listen sk for + * the received SYN in the TCP case). reuse->sk is one of the sk + * in the reuseport group. The bpf prog can use reuse->sk to learn + * the local listening ip/port without looking into the skb. + * + * When reuse->migrating_sk is not NULL, reuse->sk is closed and + * reuse->migrating_sk is the socket that needs to be migrated + * to another listening socket. migrating_sk could be a fullsock + * sk that is fully established or a reqsk that is in-the-middle + * of 3-way handshake. + */ + __bpf_md_ptr(struct bpf_sock *, sk); + __bpf_md_ptr(struct bpf_sock *, migrating_sk); }; #define BPF_TAG_SIZE 8 @@ -6009,6 +6181,11 @@ struct bpf_spin_lock { __u32 val; }; +struct bpf_timer { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 5df73001aad4..d7d3cfead056 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -154,7 +154,7 @@ struct btrfs_scrub_progress { __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ __u64 read_errors; /* # of read errors encountered (EIO) */ __u64 csum_errors; /* # of failed csum checks */ - __u64 verify_errors; /* # of occurences, where the metadata + __u64 verify_errors; /* # of occurrences, where the metadata * of a tree block did not match the * expected values, like generation or * logical */ @@ -174,7 +174,7 @@ struct btrfs_scrub_progress { __u64 last_physical; /* last physical address scrubbed. In * case a scrub was aborted, this can * be used to restart the scrub */ - __u64 unverified_errors; /* # of occurences where a read for a + __u64 unverified_errors; /* # of occurrences where a read for a * full (64k) bio failed, but the re- * check succeeded for each 4k piece. * Intermittent error. */ @@ -288,6 +288,7 @@ struct btrfs_ioctl_fs_info_args { * first mount when booting older kernel versions. */ #define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1) +#define BTRFS_FEATURE_COMPAT_RO_VERITY (1ULL << 2) #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 58d7cff9afb1..e1c4c732aaba 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -59,7 +59,7 @@ /* for storing balance parameters in the root tree */ #define BTRFS_BALANCE_OBJECTID -4ULL -/* orhpan objectid for tracking unlinked/truncated files */ +/* orphan objectid for tracking unlinked/truncated files */ #define BTRFS_ORPHAN_OBJECTID -5ULL /* does write ahead logging to speed up fsyncs */ @@ -118,6 +118,29 @@ #define BTRFS_INODE_REF_KEY 12 #define BTRFS_INODE_EXTREF_KEY 13 #define BTRFS_XATTR_ITEM_KEY 24 + +/* + * fs verity items are stored under two different key types on disk. + * The descriptor items: + * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ] + * + * At offset 0, we store a btrfs_verity_descriptor_item which tracks the size + * of the descriptor item and some extra data for encryption. + * Starting at offset 1, these hold the generic fs verity descriptor. The + * latter are opaque to btrfs, we just read and write them as a blob for the + * higher level verity code. The most common descriptor size is 256 bytes. + * + * The merkle tree items: + * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ] + * + * These also start at offset 0, and correspond to the merkle tree bytes. When + * fsverity asks for page 0 of the merkle tree, we pull up one page starting at + * offset 0 for this key type. These are also opaque to btrfs, we're blindly + * storing whatever fsverity sends down. + */ +#define BTRFS_VERITY_DESC_ITEM_KEY 36 +#define BTRFS_VERITY_MERKLE_ITEM_KEY 37 + #define BTRFS_ORPHAN_ITEM_KEY 48 /* reserve 2-15 close to the inode for later flexibility */ @@ -275,7 +298,7 @@ #define BTRFS_PERSISTENT_ITEM_KEY 249 /* - * Persistantly stores the device replace state in the device tree. + * Persistently stores the device replace state in the device tree. * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). */ #define BTRFS_DEV_REPLACE_KEY 250 @@ -991,4 +1014,16 @@ struct btrfs_qgroup_limit_item { __le64 rsv_excl; } __attribute__ ((__packed__)); +struct btrfs_verity_descriptor_item { + /* Size of the verity descriptor in bytes */ + __le64 size; + /* + * When we implement support for fscrypt, we will need to encrypt the + * Merkle tree for encrypted verity files. These 128 bits are for the + * eventual storage of an fscrypt initialization vector. + */ + __le64 reserved[2]; + __u8 encryption; +} __attribute__ ((__packed__)); + #endif /* _BTRFS_CTREE_H_ */ diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index c7535352fef6..90801ada2bbe 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -123,8 +123,8 @@ struct can_frame { /* * defined bits for canfd_frame.flags * - * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to - * be set in the CAN frame bitstream on the wire. The EDL bit switch turns + * The use of struct canfd_frame implies the FD Frame (FDF) bit to + * be set in the CAN frame bitstream on the wire. The FDF bit switch turns * the CAN controllers bitstream processor into the CAN FD mode which creates * two new options within the CAN FD frame specification: * @@ -135,9 +135,18 @@ struct can_frame { * controller only the CANFD_BRS bit is relevant for real CAN controllers when * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make * sense for virtual CAN interfaces to test applications with echoed frames. + * + * The struct can_frame and struct canfd_frame intentionally share the same + * layout to be able to write CAN frame content into a CAN FD frame structure. + * When this is done the former differentiation via CAN_MTU / CANFD_MTU gets + * lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of + * using struct canfd_frame for mixed CAN / CAN FD content (dual use). + * N.B. the Kernel APIs do NOT provide mixed CAN / CAN FD content inside of + * struct canfd_frame therefore the CANFD_FDF flag is disregarded by Linux. */ #define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ #define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ +#define CANFD_FDF 0x04 /* mark CAN FD for dual use of struct canfd_frame */ /** * struct canfd_frame - CAN flexible data rate frame structure diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h index df6e821075c1..38936460f668 100644 --- a/include/uapi/linux/can/j1939.h +++ b/include/uapi/linux/can/j1939.h @@ -78,11 +78,20 @@ enum { enum { J1939_NLA_PAD, J1939_NLA_BYTES_ACKED, + J1939_NLA_TOTAL_SIZE, + J1939_NLA_PGN, + J1939_NLA_SRC_NAME, + J1939_NLA_DEST_NAME, + J1939_NLA_SRC_ADDR, + J1939_NLA_DEST_ADDR, }; enum { J1939_EE_INFO_NONE, J1939_EE_INFO_TX_ABORT, + J1939_EE_INFO_RX_RTS, + J1939_EE_INFO_RX_DPO, + J1939_EE_INFO_RX_ABORT, }; struct j1939_filter { diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 2ddb4226cd23..463d1ba2232a 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -243,7 +243,6 @@ struct vfs_ns_cap_data { /* Allow examination and configuration of disk quotas */ /* Allow setting the domainname */ /* Allow setting the hostname */ -/* Allow calling bdflush() */ /* Allow mount() and umount(), setting up new smb connection */ /* Allow some autofs root ioctls */ /* Allow nfsservctl */ diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h index 37590027b604..c3baaea0b8ef 100644 --- a/include/uapi/linux/cec-funcs.h +++ b/include/uapi/linux/cec-funcs.h @@ -1665,7 +1665,7 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg, if (*audio_out_compensated == 3 && msg->len >= 7) *audio_out_delay = msg->msg[6]; else - *audio_out_delay = 0; + *audio_out_delay = 1; } static inline void cec_msg_request_current_latency(struct cec_msg *msg, diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index dc8879d179fd..de936f5e446d 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -642,7 +642,7 @@ struct cec_event { #define CEC_OP_REC_SEQ_WEDNESDAY 0x08 #define CEC_OP_REC_SEQ_THURSDAY 0x10 #define CEC_OP_REC_SEQ_FRIDAY 0x20 -#define CEC_OP_REC_SEQ_SATERDAY 0x40 +#define CEC_OP_REC_SEQ_SATURDAY 0x40 #define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 #define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 3155382dfc9b..8d206f27bb6d 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -29,6 +29,18 @@ ___C(GET_LSA, "Get Label Storage Area"), \ ___C(GET_HEALTH_INFO, "Get Health Info"), \ ___C(GET_LOG, "Get Log"), \ + ___C(SET_PARTITION_INFO, "Set Partition Information"), \ + ___C(SET_LSA, "Set Label Storage Area"), \ + ___C(GET_ALERT_CONFIG, "Get Alert Configuration"), \ + ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \ + ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \ + ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \ + ___C(GET_POISON, "Get Poison List"), \ + ___C(INJECT_POISON, "Inject Poison"), \ + ___C(CLEAR_POISON, "Clear Poison"), \ + ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ + ___C(SCAN_MEDIA, "Scan Media"), \ + ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a @@ -38,7 +50,7 @@ enum { CXL_CMDS }; #define ___C(a, b) { b } static const struct { const char *name; -} cxl_command_names[] = { CXL_CMDS }; +} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS }; /* * Here's how this actually breaks out: diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index f6008b2fa60f..32f53a0069d6 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -126,6 +126,11 @@ enum devlink_command { DEVLINK_CMD_HEALTH_REPORTER_TEST, + DEVLINK_CMD_RATE_GET, /* can dump */ + DEVLINK_CMD_RATE_SET, + DEVLINK_CMD_RATE_NEW, + DEVLINK_CMD_RATE_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -206,6 +211,11 @@ enum devlink_port_flavour { */ }; +enum devlink_rate_type { + DEVLINK_RATE_TYPE_LEAF, + DEVLINK_RATE_TYPE_NODE, +}; + enum devlink_param_cmode { DEVLINK_PARAM_CMODE_RUNTIME, DEVLINK_PARAM_CMODE_DRIVERINIT, @@ -534,6 +544,13 @@ enum devlink_attr { DEVLINK_ATTR_RELOAD_ACTION_STATS, /* nested */ DEVLINK_ATTR_PORT_PCI_SF_NUMBER, /* u32 */ + + DEVLINK_ATTR_RATE_TYPE, /* u16 */ + DEVLINK_ATTR_RATE_TX_SHARE, /* u64 */ + DEVLINK_ATTR_RATE_TX_MAX, /* u64 */ + DEVLINK_ATTR_RATE_NODE_NAME, /* string */ + DEVLINK_ATTR_RATE_PARENT_NODE_NAME, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index e5c6e458bdf7..c12ce30b52df 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -376,4 +376,10 @@ enum { */ #define DM_INTERNAL_SUSPEND_FLAG (1 << 18) /* Out */ +/* + * If set, returns in the in buffer passed by UM, the raw table information + * that would be measured by IMA subsystem on device state change. + */ +#define DM_IMA_MEASUREMENT_FLAG (1 << 19) /* In */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index 7f30393b92c3..8e4a2ca0bcbf 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -22,8 +22,56 @@ #include <linux/types.h> -/* begin/end dma-buf functions used for userspace mmap. */ +/** + * struct dma_buf_sync - Synchronize with CPU access. + * + * When a DMA buffer is accessed from the CPU via mmap, it is not always + * possible to guarantee coherency between the CPU-visible map and underlying + * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket + * any CPU access to give the kernel the chance to shuffle memory around if + * needed. + * + * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC + * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the + * access is complete, the client should call DMA_BUF_IOCTL_SYNC with + * DMA_BUF_SYNC_END and the same read/write flags. + * + * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache + * coherency. It does not prevent other processes or devices from + * accessing the memory at the same time. If synchronization with a GPU or + * other device driver is required, it is the client's responsibility to + * wait for buffer to be ready for reading or writing before calling this + * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that + * follow-up work is not submitted to GPU or other device driver until + * after this ioctl has been called with DMA_BUF_SYNC_END? + * + * If the driver or API with which the client is interacting uses implicit + * synchronization, waiting for prior work to complete can be done via + * poll() on the DMA buffer file descriptor. If the driver or API requires + * explicit synchronization, the client may have to wait on a sync_file or + * other synchronization primitive outside the scope of the DMA buffer API. + */ struct dma_buf_sync { + /** + * @flags: Set of access flags + * + * DMA_BUF_SYNC_START: + * Indicates the start of a map access session. + * + * DMA_BUF_SYNC_END: + * Indicates the end of a map access session. + * + * DMA_BUF_SYNC_READ: + * Indicates that the mapped DMA buffer will be read by the + * client via the CPU map. + * + * DMA_BUF_SYNC_WRITE: + * Indicates that the mapped DMA buffer will be written by the + * client via the CPU map. + * + * DMA_BUF_SYNC_RW: + * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. + */ __u64 flags; }; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index cfef6b08169a..b6db6590baf0 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -233,7 +233,7 @@ enum tunable_id { ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ /* * Add your fresh new tunable attribute above and remember to update - * tunable_strings[] in net/core/ethtool.c + * tunable_strings[] in net/ethtool/common.c */ __ETHTOOL_TUNABLE_COUNT, }; @@ -297,7 +297,7 @@ enum phy_tunable_id { ETHTOOL_PHY_EDPD, /* * Add your fresh new phy tunable attribute above and remember to update - * phy_tunable_strings[] in net/core/ethtool.c + * phy_tunable_strings[] in net/ethtool/common.c */ __ETHTOOL_PHY_TUNABLE_COUNT, }; @@ -639,6 +639,8 @@ enum ethtool_link_ext_substate_link_logical_mismatch { enum ethtool_link_ext_substate_bad_signal_integrity { ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS, }; /* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */ diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 825cfda1c5d5..5545f1ca9237 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -46,6 +46,7 @@ enum { ETHTOOL_MSG_FEC_SET, ETHTOOL_MSG_MODULE_EEPROM_GET, ETHTOOL_MSG_STATS_GET, + ETHTOOL_MSG_PHC_VCLOCKS_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -88,6 +89,7 @@ enum { ETHTOOL_MSG_FEC_NTF, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, ETHTOOL_MSG_STATS_GET_REPLY, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -375,6 +377,8 @@ enum { ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */ ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */ + ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, /* u8 */ + ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, /* u8 */ /* add new constants above here */ __ETHTOOL_A_COALESCE_CNT, @@ -440,6 +444,19 @@ enum { ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +/* PHC VCLOCKS */ + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC, + ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */ + ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */ + + /* add new constants above here */ + __ETHTOOL_A_PHC_VCLOCKS_CNT, + ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) +}; + /* CABLE TEST */ enum { @@ -675,7 +692,7 @@ enum { ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */ ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */ ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */ - ETHTOOL_A_MODULE_EEPROM_DATA, /* nested */ + ETHTOOL_A_MODULE_EEPROM_DATA, /* binary */ __ETHTOOL_A_MODULE_EEPROM_CNT, ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1) diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index fbf9c5c7dd59..64553df9d735 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -51,6 +51,7 @@ #define FAN_ENABLE_AUDIT 0x00000040 /* Flags to determine fanotify event format */ +#define FAN_REPORT_PIDFD 0x00000080 /* Report pidfd for event->pid */ #define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */ #define FAN_REPORT_FID 0x00000200 /* Report unique file id */ #define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */ @@ -123,6 +124,7 @@ struct fanotify_event_metadata { #define FAN_EVENT_INFO_TYPE_FID 1 #define FAN_EVENT_INFO_TYPE_DFID_NAME 2 #define FAN_EVENT_INFO_TYPE_DFID 3 +#define FAN_EVENT_INFO_TYPE_PIDFD 4 /* Variable length info record following event metadata */ struct fanotify_event_info_header { @@ -148,6 +150,15 @@ struct fanotify_event_info_fid { unsigned char handle[0]; }; +/* + * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD. + * It holds a pidfd for the pid that was responsible for generating an event. + */ +struct fanotify_event_info_pidfd { + struct fanotify_event_info_header hdr; + __s32 pidfd; +}; + struct fanotify_response { __s32 fd; __u32 response; @@ -160,6 +171,8 @@ struct fanotify_response { /* No fd set in event */ #define FAN_NOFD -1 +#define FAN_NOPIDFD FAN_NOFD +#define FAN_EPIDFD -2 /* Helper functions to deal with fanotify_event_metadata buffers */ #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata)) diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 4c32e97dcdf0..bdf7b404b3e7 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -184,6 +184,7 @@ struct fsxattr { #define BLKSECDISCARD _IO(0x12,125) #define BLKROTATIONAL _IO(0x12,126) #define BLKZEROOUT _IO(0x12,127) +#define BLKGETDISKSEQ _IOR(0x12,128,__u64) /* * A jump here: 130-136 are reserved for zoned block devices * (see uapi/linux/blkzoned.h) diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 271ae90a9bb7..36ed092227fa 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -181,6 +181,9 @@ * - add FUSE_OPEN_KILL_SUIDGID * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT * - add FUSE_SETXATTR_ACL_KILL_SGID + * + * 7.34 + * - add FUSE_SYNCFS */ #ifndef _LINUX_FUSE_H @@ -216,7 +219,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 33 +#define FUSE_KERNEL_MINOR_VERSION 34 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -509,6 +512,7 @@ enum fuse_opcode { FUSE_COPY_FILE_RANGE = 47, FUSE_SETUPMAPPING = 48, FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -971,4 +975,8 @@ struct fuse_removemapping_one { #define FUSE_REMOVEMAPPING_MAX_ENTRY \ (PAGE_SIZE / sizeof(struct fuse_removemapping_one)) +struct fuse_syncfs_in { + uint64_t padding; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index a89eb0accd5e..235e5b2facaa 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -21,6 +21,7 @@ #define FUTEX_WAKE_BITSET 10 #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 +#define FUTEX_LOCK_PI2 13 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -32,6 +33,7 @@ #define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG) #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG) diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h index c1da8244c5e1..163c0998aec9 100644 --- a/include/uapi/linux/icmp.h +++ b/include/uapi/linux/icmp.h @@ -20,7 +20,6 @@ #include <linux/types.h> #include <asm/byteorder.h> -#include <linux/in.h> #include <linux/if.h> #include <linux/in6.h> @@ -154,7 +153,7 @@ struct icmp_ext_echo_iio { struct { struct icmp_ext_echo_ctype3_hdr ctype3_hdr; union { - struct in_addr ipv4_addr; + __be32 ipv4_addr; struct in6_addr ipv6_addr; } ip_addr; } addr; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index e33997b4d750..c750eac09fc9 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */ /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #ifndef _USR_IDXD_H_ #define _USR_IDXD_H_ @@ -9,6 +9,30 @@ #include <stdint.h> #endif +/* Driver command error status */ +enum idxd_scmd_stat { + IDXD_SCMD_DEV_ENABLED = 0x80000010, + IDXD_SCMD_DEV_NOT_ENABLED = 0x80000020, + IDXD_SCMD_WQ_ENABLED = 0x80000021, + IDXD_SCMD_DEV_DMA_ERR = 0x80020000, + IDXD_SCMD_WQ_NO_GRP = 0x80030000, + IDXD_SCMD_WQ_NO_NAME = 0x80040000, + IDXD_SCMD_WQ_NO_SVM = 0x80050000, + IDXD_SCMD_WQ_NO_THRESH = 0x80060000, + IDXD_SCMD_WQ_PORTAL_ERR = 0x80070000, + IDXD_SCMD_WQ_RES_ALLOC_ERR = 0x80080000, + IDXD_SCMD_PERCPU_ERR = 0x80090000, + IDXD_SCMD_DMA_CHAN_ERR = 0x800a0000, + IDXD_SCMD_CDEV_ERR = 0x800b0000, + IDXD_SCMD_WQ_NO_SWQ_SUPPORT = 0x800c0000, + IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000, + IDXD_SCMD_WQ_NO_SIZE = 0x800e0000, + IDXD_SCMD_WQ_NO_PRIV = 0x800f0000, +}; + +#define IDXD_SCMD_SOFTERR_MASK 0x80000000 +#define IDXD_SCMD_SOFTERR_SHIFT 16 + /* Descriptor flags */ #define IDXD_OP_FLAG_FENCE 0x0001 #define IDXD_OP_FLAG_BOF 0x0002 diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h index c3cc5a9e5eaf..4783af9fe520 100644 --- a/include/uapi/linux/if_arp.h +++ b/include/uapi/linux/if_arp.h @@ -54,6 +54,7 @@ #define ARPHRD_X25 271 /* CCITT X.25 */ #define ARPHRD_HWX25 272 /* Boards with X.25 in firmware */ #define ARPHRD_CAN 280 /* Controller Area Network */ +#define ARPHRD_MCTP 290 #define ARPHRD_PPP 512 #define ARPHRD_CISCO 513 /* Cisco HDLC */ #define ARPHRD_HDLC ARPHRD_CISCO diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 13d59c51ef5b..2711c3522010 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -479,16 +479,22 @@ enum { /* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */ #define BRIDGE_VLANDB_DUMPF_STATS (1 << 0) /* Include stats in the dump */ +#define BRIDGE_VLANDB_DUMPF_GLOBAL (1 << 1) /* Dump global vlan options only */ /* Bridge vlan RTM attributes * [BRIDGE_VLANDB_ENTRY] = { * [BRIDGE_VLANDB_ENTRY_INFO] * ... * } + * [BRIDGE_VLANDB_GLOBAL_OPTIONS] = { + * [BRIDGE_VLANDB_GOPTS_ID] + * ... + * } */ enum { BRIDGE_VLANDB_UNSPEC, BRIDGE_VLANDB_ENTRY, + BRIDGE_VLANDB_GLOBAL_OPTIONS, __BRIDGE_VLANDB_MAX, }; #define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1) @@ -500,6 +506,7 @@ enum { BRIDGE_VLANDB_ENTRY_STATE, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO, BRIDGE_VLANDB_ENTRY_STATS, + BRIDGE_VLANDB_ENTRY_MCAST_ROUTER, __BRIDGE_VLANDB_ENTRY_MAX, }; #define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1) @@ -538,6 +545,29 @@ enum { }; #define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1) +enum { + BRIDGE_VLANDB_GOPTS_UNSPEC, + BRIDGE_VLANDB_GOPTS_ID, + BRIDGE_VLANDB_GOPTS_RANGE, + BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING, + BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION, + BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION, + BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT, + BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT, + BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL, + BRIDGE_VLANDB_GOPTS_PAD, + BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL, + BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL, + BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL, + BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL, + BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL, + BRIDGE_VLANDB_GOPTS_MCAST_QUERIER, + BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS, + BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE, + __BRIDGE_VLANDB_GOPTS_MAX +}; +#define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1) + /* Bridge multicast database attributes * [MDBA_MDB] = { * [MDBA_MDB_ENTRY] = { @@ -627,6 +657,9 @@ enum { MDBA_ROUTER_PATTR_UNSPEC, MDBA_ROUTER_PATTR_TIMER, MDBA_ROUTER_PATTR_TYPE, + MDBA_ROUTER_PATTR_INET_TIMER, + MDBA_ROUTER_PATTR_INET6_TIMER, + MDBA_ROUTER_PATTR_VID, __MDBA_ROUTER_PATTR_MAX }; #define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1) @@ -718,12 +751,14 @@ struct br_mcast_stats { /* bridge boolean options * BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets + * BR_BOOLOPT_MCAST_VLAN_SNOOPING - control vlan multicast snooping * * IMPORTANT: if adding a new option do not forget to handle * it in br_boolopt_toggle/get and bridge sysfs */ enum br_boolopt_id { BR_BOOLOPT_NO_LL_LEARN, + BR_BOOLOPT_MCAST_VLAN_SNOOPING, BR_BOOLOPT_MAX }; @@ -736,4 +771,17 @@ struct br_boolopt_multi { __u32 optval; __u32 optmask; }; + +enum { + BRIDGE_QUERIER_UNSPEC, + BRIDGE_QUERIER_IP_ADDRESS, + BRIDGE_QUERIER_IP_PORT, + BRIDGE_QUERIER_IP_OTHER_TIMER, + BRIDGE_QUERIER_PAD, + BRIDGE_QUERIER_IPV6_ADDRESS, + BRIDGE_QUERIER_IPV6_PORT, + BRIDGE_QUERIER_IPV6_OTHER_TIMER, + __BRIDGE_QUERIER_MAX +}; +#define BRIDGE_QUERIER_MAX (__BRIDGE_QUERIER_MAX - 1) #endif /* _UAPI_LINUX_IF_BRIDGE_H */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index a0b637911d3c..5f589c7a8382 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -151,6 +151,9 @@ #define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and * aggregation protocol */ +#define ETH_P_MCTP 0x00FA /* Management component transport + * protocol packets + */ /* * This is an Ethernet frame header. diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cd5b382a4138..eebd3894fe89 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -341,6 +341,13 @@ enum { IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, IFLA_PROTO_DOWN_REASON, + + /* device (sysfs) name as parent, used instead + * of IFLA_LINK where there's no parent netdev + */ + IFLA_PARENT_DEV_NAME, + IFLA_PARENT_DEV_BUS_NAME, + __IFLA_MAX }; @@ -410,6 +417,7 @@ enum { IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ + IFLA_INET6_RA_MTU, /* mtu carried in the RA message */ __IFLA_INET6_MAX }; @@ -472,6 +480,7 @@ enum { IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, + IFLA_BR_MCAST_QUERIER_STATE, __IFLA_BR_MAX, }; @@ -848,6 +857,7 @@ enum { IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, IFLA_BOND_PEER_NOTIF_DELAY, + IFLA_BOND_AD_LACP_ACTIVE, __IFLA_BOND_MAX, }; @@ -1236,6 +1246,8 @@ enum { #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC, @@ -1251,4 +1263,14 @@ struct ifla_rmnet_flags { __u32 mask; }; +/* MCTP section */ + +enum { + IFLA_MCTP_UNSPEC, + IFLA_MCTP_NET, + __IFLA_MCTP_MAX, +}; + +#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 7d6687618d80..14168225cecd 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -188,11 +188,22 @@ struct ip_mreq_source { }; struct ip_msfilter { - __be32 imsf_multiaddr; - __be32 imsf_interface; - __u32 imsf_fmode; - __u32 imsf_numsrc; - __be32 imsf_slist[1]; + union { + struct { + __be32 imsf_multiaddr_aux; + __be32 imsf_interface_aux; + __u32 imsf_fmode_aux; + __u32 imsf_numsrc_aux; + __be32 imsf_slist[1]; + }; + struct { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist_flex[]; + }; + }; }; #define IP_MSFILTER_SIZE(numsrc) \ @@ -211,11 +222,22 @@ struct group_source_req { }; struct group_filter { - __u32 gf_interface; /* interface index */ - struct __kernel_sockaddr_storage gf_group; /* multicast address */ - __u32 gf_fmode; /* filter mode */ - __u32 gf_numsrc; /* number of sources */ - struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ + union { + struct { + __u32 gf_interface_aux; /* interface index */ + struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */ + __u32 gf_fmode_aux; /* filter mode */ + __u32 gf_numsrc_aux; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ + }; + struct { + __u32 gf_interface; /* interface index */ + struct __kernel_sockaddr_storage gf_group; /* multicast address */ + __u32 gf_fmode; /* filter mode */ + __u32 gf_numsrc; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */ + }; + }; }; #define GROUP_FILTER_SIZE(numsrc) \ @@ -289,6 +311,9 @@ struct sockaddr_in { /* Address indicating an error return. */ #define INADDR_NONE ((unsigned long int) 0xffffffff) +/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */ +#define INADDR_DUMMY ((unsigned long int) 0xc0000008) + /* Network number for local host loopback. */ #define IN_LOOPBACKNET 127 diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 5ad396a57eb3..c4c53a9ab959 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -145,6 +145,7 @@ struct in6_flowlabel_req { #define IPV6_TLV_PADN 1 #define IPV6_TLV_ROUTERALERT 5 #define IPV6_TLV_CALIPSO 7 /* RFC 5570 */ +#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */ #define IPV6_TLV_JUMBO 194 #define IPV6_TLV_HAO 201 /* home address option */ diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index e1ae46683301..59ef35154e3d 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -44,23 +44,23 @@ struct io_uring_sqe { __u32 splice_flags; __u32 rename_flags; __u32 unlink_flags; + __u32 hardlink_flags; }; __u64 user_data; /* data to be passed back at completion time */ + /* pack this to avoid bogus arm OABI complaints */ union { - struct { - /* pack this to avoid bogus arm OABI complaints */ - union { - /* index into fixed buffers, if used */ - __u16 buf_index; - /* for grouped buffer selection */ - __u16 buf_group; - } __attribute__((packed)); - /* personality to use, if used */ - __u16 personality; - __s32 splice_fd_in; - }; - __u64 __pad2[3]; + /* index into fixed buffers, if used */ + __u16 buf_index; + /* for grouped buffer selection */ + __u16 buf_group; + } __attribute__((packed)); + /* personality to use, if used */ + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; }; + __u64 __pad2[2]; }; enum { @@ -137,6 +137,9 @@ enum { IORING_OP_SHUTDOWN, IORING_OP_RENAMEAT, IORING_OP_UNLINKAT, + IORING_OP_MKDIRAT, + IORING_OP_SYMLINKAT, + IORING_OP_LINKAT, /* this goes last, obviously */ IORING_OP_LAST, @@ -150,9 +153,13 @@ enum { /* * sqe->timeout_flags */ -#define IORING_TIMEOUT_ABS (1U << 0) -#define IORING_TIMEOUT_UPDATE (1U << 1) - +#define IORING_TIMEOUT_ABS (1U << 0) +#define IORING_TIMEOUT_UPDATE (1U << 1) +#define IORING_TIMEOUT_BOOTTIME (1U << 2) +#define IORING_TIMEOUT_REALTIME (1U << 3) +#define IORING_LINK_TIMEOUT_UPDATE (1U << 4) +#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) +#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) /* * sqe->splice_flags * extends splice(2) flags @@ -280,6 +287,7 @@ struct io_uring_params { #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) #define IORING_FEAT_EXT_ARG (1U << 8) #define IORING_FEAT_NATIVE_WORKERS (1U << 9) +#define IORING_FEAT_RSRC_TAGS (1U << 10) /* * io_uring_register(2) opcodes and arguments @@ -298,8 +306,19 @@ enum { IORING_UNREGISTER_PERSONALITY = 10, IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, - IORING_REGISTER_RSRC = 13, - IORING_REGISTER_RSRC_UPDATE = 14, + + /* extended with tagging */ + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + + /* set/clear io-wq thread affinities */ + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + + /* set/get max number of workers */ + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, /* this goes last */ IORING_REGISTER_LAST @@ -312,14 +331,10 @@ struct io_uring_files_update { __aligned_u64 /* __s32 * */ fds; }; -enum { - IORING_RSRC_FILE = 0, - IORING_RSRC_BUFFER = 1, -}; - struct io_uring_rsrc_register { - __u32 type; __u32 nr; + __u32 resv; + __u64 resv2; __aligned_u64 data; __aligned_u64 tags; }; @@ -335,8 +350,8 @@ struct io_uring_rsrc_update2 { __u32 resv; __aligned_u64 data; __aligned_u64 tags; - __u32 type; __u32 nr; + __u32 resv2; }; /* Skip updating fd indexes set to this value in the fd table */ diff --git a/include/uapi/linux/ioam6.h b/include/uapi/linux/ioam6.h new file mode 100644 index 000000000000..ac4de376f0ce --- /dev/null +++ b/include/uapi/linux/ioam6.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * IPv6 IOAM implementation + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ + +#ifndef _UAPI_LINUX_IOAM6_H +#define _UAPI_LINUX_IOAM6_H + +#include <asm/byteorder.h> +#include <linux/types.h> + +#define IOAM6_U16_UNAVAILABLE U16_MAX +#define IOAM6_U32_UNAVAILABLE U32_MAX +#define IOAM6_U64_UNAVAILABLE U64_MAX + +#define IOAM6_DEFAULT_ID (IOAM6_U32_UNAVAILABLE >> 8) +#define IOAM6_DEFAULT_ID_WIDE (IOAM6_U64_UNAVAILABLE >> 8) +#define IOAM6_DEFAULT_IF_ID IOAM6_U16_UNAVAILABLE +#define IOAM6_DEFAULT_IF_ID_WIDE IOAM6_U32_UNAVAILABLE + +/* + * IPv6 IOAM Option Header + */ +struct ioam6_hdr { + __u8 opt_type; + __u8 opt_len; + __u8 :8; /* reserved */ +#define IOAM6_TYPE_PREALLOC 0 + __u8 type; +} __attribute__((packed)); + +/* + * IOAM Trace Header + */ +struct ioam6_trace_hdr { + __be16 namespace_id; + +#if defined(__LITTLE_ENDIAN_BITFIELD) + + __u8 :1, /* unused */ + :1, /* unused */ + overflow:1, + nodelen:5; + + __u8 remlen:7, + :1; /* unused */ + + union { + __be32 type_be32; + + struct { + __u32 bit7:1, + bit6:1, + bit5:1, + bit4:1, + bit3:1, + bit2:1, + bit1:1, + bit0:1, + bit15:1, /* unused */ + bit14:1, /* unused */ + bit13:1, /* unused */ + bit12:1, /* unused */ + bit11:1, + bit10:1, + bit9:1, + bit8:1, + bit23:1, /* reserved */ + bit22:1, + bit21:1, /* unused */ + bit20:1, /* unused */ + bit19:1, /* unused */ + bit18:1, /* unused */ + bit17:1, /* unused */ + bit16:1, /* unused */ + :8; /* reserved */ + } type; + }; + +#elif defined(__BIG_ENDIAN_BITFIELD) + + __u8 nodelen:5, + overflow:1, + :1, /* unused */ + :1; /* unused */ + + __u8 :1, /* unused */ + remlen:7; + + union { + __be32 type_be32; + + struct { + __u32 bit0:1, + bit1:1, + bit2:1, + bit3:1, + bit4:1, + bit5:1, + bit6:1, + bit7:1, + bit8:1, + bit9:1, + bit10:1, + bit11:1, + bit12:1, /* unused */ + bit13:1, /* unused */ + bit14:1, /* unused */ + bit15:1, /* unused */ + bit16:1, /* unused */ + bit17:1, /* unused */ + bit18:1, /* unused */ + bit19:1, /* unused */ + bit20:1, /* unused */ + bit21:1, /* unused */ + bit22:1, + bit23:1, /* reserved */ + :8; /* reserved */ + } type; + }; + +#else +#error "Please fix <asm/byteorder.h>" +#endif + +#define IOAM6_TRACE_DATA_SIZE_MAX 244 + __u8 data[0]; +} __attribute__((packed)); + +#endif /* _UAPI_LINUX_IOAM6_H */ diff --git a/include/uapi/linux/ioam6_genl.h b/include/uapi/linux/ioam6_genl.h new file mode 100644 index 000000000000..ca4b22833754 --- /dev/null +++ b/include/uapi/linux/ioam6_genl.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * IPv6 IOAM Generic Netlink API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ + +#ifndef _UAPI_LINUX_IOAM6_GENL_H +#define _UAPI_LINUX_IOAM6_GENL_H + +#define IOAM6_GENL_NAME "IOAM6" +#define IOAM6_GENL_VERSION 0x1 + +enum { + IOAM6_ATTR_UNSPEC, + + IOAM6_ATTR_NS_ID, /* u16 */ + IOAM6_ATTR_NS_DATA, /* u32 */ + IOAM6_ATTR_NS_DATA_WIDE,/* u64 */ + +#define IOAM6_MAX_SCHEMA_DATA_LEN (255 * 4) + IOAM6_ATTR_SC_ID, /* u32 */ + IOAM6_ATTR_SC_DATA, /* Binary */ + IOAM6_ATTR_SC_NONE, /* Flag */ + + IOAM6_ATTR_PAD, + + __IOAM6_ATTR_MAX, +}; + +#define IOAM6_ATTR_MAX (__IOAM6_ATTR_MAX - 1) + +enum { + IOAM6_CMD_UNSPEC, + + IOAM6_CMD_ADD_NAMESPACE, + IOAM6_CMD_DEL_NAMESPACE, + IOAM6_CMD_DUMP_NAMESPACES, + + IOAM6_CMD_ADD_SCHEMA, + IOAM6_CMD_DEL_SCHEMA, + IOAM6_CMD_DUMP_SCHEMAS, + + IOAM6_CMD_NS_SET_SCHEMA, + + __IOAM6_CMD_MAX, +}; + +#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_IOAM6_GENL_H */ diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h new file mode 100644 index 000000000000..bae14636a8c8 --- /dev/null +++ b/include/uapi/linux/ioam6_iptunnel.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * IPv6 IOAM Lightweight Tunnel API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ + +#ifndef _UAPI_LINUX_IOAM6_IPTUNNEL_H +#define _UAPI_LINUX_IOAM6_IPTUNNEL_H + +enum { + IOAM6_IPTUNNEL_UNSPEC, + IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */ + __IOAM6_IPTUNNEL_MAX, +}; + +#define IOAM6_IPTUNNEL_MAX (__IOAM6_IPTUNNEL_MAX - 1) + +#endif /* _UAPI_LINUX_IOAM6_IPTUNNEL_H */ diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h new file mode 100644 index 000000000000..f70f2596a6bf --- /dev/null +++ b/include/uapi/linux/ioprio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_IOPRIO_H +#define _UAPI_LINUX_IOPRIO_H + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_CLASS_SHIFT 13 +#define IOPRIO_CLASS_MASK 0x07 +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(ioprio) \ + (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK) +#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK) +#define IOPRIO_PRIO_VALUE(class, data) \ + ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \ + ((data) & IOPRIO_PRIO_MASK)) + +/* + * These are the io priority groups as implemented by the BFQ and mq-deadline + * schedulers. RT is the realtime class, it always gets premium service. For + * ATA disks supporting NCQ IO priority, RT class IOs will be processed using + * high priority NCQ commands. BE is the best-effort scheduling class, the + * default for any process. IDLE is the idle scheduling class, it is only + * served when no one else is using the disk. + */ +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * The RT and BE priority classes both support up to 8 priority levels. + */ +#define IOPRIO_NR_LEVELS 8 +#define IOPRIO_BE_NR IOPRIO_NR_LEVELS + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * Fallback BE priority level. + */ +#define IOPRIO_NORM 4 +#define IOPRIO_BE_NORM IOPRIO_NORM + +#endif /* _UAPI_LINUX_IOPRIO_H */ diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 70603775fe91..b243a53fa985 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -190,6 +190,9 @@ enum { DEVCONF_NDISC_TCLASS, DEVCONF_RPL_SEG_ENABLED, DEVCONF_RA_DEFRTR_METRIC, + DEVCONF_IOAM6_ENABLED, + DEVCONF_IOAM6_ID, + DEVCONF_IOAM6_ID_WIDE, DEVCONF_MAX }; diff --git a/include/uapi/linux/ipx.h b/include/uapi/linux/ipx.h deleted file mode 100644 index 3168137adae8..000000000000 --- a/include/uapi/linux/ipx.h +++ /dev/null @@ -1,87 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _IPX_H_ -#define _IPX_H_ -#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */ -#include <linux/types.h> -#include <linux/sockios.h> -#include <linux/socket.h> -#define IPX_NODE_LEN 6 -#define IPX_MTU 576 - -#if __UAPI_DEF_SOCKADDR_IPX -struct sockaddr_ipx { - __kernel_sa_family_t sipx_family; - __be16 sipx_port; - __be32 sipx_network; - unsigned char sipx_node[IPX_NODE_LEN]; - __u8 sipx_type; - unsigned char sipx_zero; /* 16 byte fill */ -}; -#endif /* __UAPI_DEF_SOCKADDR_IPX */ - -/* - * So we can fit the extra info for SIOCSIFADDR into the address nicely - */ -#define sipx_special sipx_port -#define sipx_action sipx_zero -#define IPX_DLTITF 0 -#define IPX_CRTITF 1 - -#if __UAPI_DEF_IPX_ROUTE_DEFINITION -struct ipx_route_definition { - __be32 ipx_network; - __be32 ipx_router_network; - unsigned char ipx_router_node[IPX_NODE_LEN]; -}; -#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */ - -#if __UAPI_DEF_IPX_INTERFACE_DEFINITION -struct ipx_interface_definition { - __be32 ipx_network; - unsigned char ipx_device[16]; - unsigned char ipx_dlink_type; -#define IPX_FRAME_NONE 0 -#define IPX_FRAME_SNAP 1 -#define IPX_FRAME_8022 2 -#define IPX_FRAME_ETHERII 3 -#define IPX_FRAME_8023 4 -#define IPX_FRAME_TR_8022 5 /* obsolete */ - unsigned char ipx_special; -#define IPX_SPECIAL_NONE 0 -#define IPX_PRIMARY 1 -#define IPX_INTERNAL 2 - unsigned char ipx_node[IPX_NODE_LEN]; -}; -#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */ - -#if __UAPI_DEF_IPX_CONFIG_DATA -struct ipx_config_data { - unsigned char ipxcfg_auto_select_primary; - unsigned char ipxcfg_auto_create_interfaces; -}; -#endif /* __UAPI_DEF_IPX_CONFIG_DATA */ - -/* - * OLD Route Definition for backward compatibility. - */ - -#if __UAPI_DEF_IPX_ROUTE_DEF -struct ipx_route_def { - __be32 ipx_network; - __be32 ipx_router_network; -#define IPX_ROUTE_NO_ROUTER 0 - unsigned char ipx_router_node[IPX_NODE_LEN]; - unsigned char ipx_device[16]; - unsigned short ipx_flags; -#define IPX_RT_SNAP 8 -#define IPX_RT_8022 4 -#define IPX_RT_BLUEBOOK 2 -#define IPX_RT_ROUTED 1 -}; -#endif /* __UAPI_DEF_IPX_ROUTE_DEF */ - -#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE) -#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1) -#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE + 2) -#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE + 3) -#endif /* _IPX_H_ */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index bf5e7d7846dd..af96af174dc4 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -30,9 +30,11 @@ * - 1.1 - initial version * - 1.3 - Add SMI events support * - 1.4 - Indicate new SRAM EDC bit in device properties + * - 1.5 - Add SVM API + * - 1.6 - Query clear flags in SVM get_attr API */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 4 +#define KFD_IOCTL_MINOR_VERSION 6 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -473,6 +475,168 @@ enum kfd_mmio_remap { KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, }; +/* Guarantee host access to memory */ +#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 +/* Fine grained coherency between all devices with access */ +#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 +/* Use any GPU in same hive as preferred device */ +#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 +/* GPUs only read, allows replication */ +#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 +/* Allow execution on GPU */ +#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 +/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ +#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 + +/** + * kfd_ioctl_svm_op - SVM ioctl operations + * + * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes + * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes + */ +enum kfd_ioctl_svm_op { + KFD_IOCTL_SVM_OP_SET_ATTR, + KFD_IOCTL_SVM_OP_GET_ATTR +}; + +/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations + * + * GPU IDs are used to specify GPUs as preferred and prefetch locations. + * Below definitions are used for system memory or for leaving the preferred + * location unspecified. + */ +enum kfd_ioctl_svm_location { + KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, + KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff +}; + +/** + * kfd_ioctl_svm_attr_type - SVM attribute types + * + * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for + * system memory + * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for + * system memory. Setting this triggers an + * immediate prefetch (migration). + * @KFD_IOCTL_SVM_ATTR_ACCESS: + * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: + * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given + * by the attribute value + * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see + * KFD_IOCTL_SVM_FLAG_...) + * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear + * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity + * (log2 num pages) + */ +enum kfd_ioctl_svm_attr_type { + KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, + KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, + KFD_IOCTL_SVM_ATTR_ACCESS, + KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, + KFD_IOCTL_SVM_ATTR_NO_ACCESS, + KFD_IOCTL_SVM_ATTR_SET_FLAGS, + KFD_IOCTL_SVM_ATTR_CLR_FLAGS, + KFD_IOCTL_SVM_ATTR_GRANULARITY +}; + +/** + * kfd_ioctl_svm_attribute - Attributes as pairs of type and value + * + * The meaning of the @value depends on the attribute type. + * + * @type: attribute type (see enum @kfd_ioctl_svm_attr_type) + * @value: attribute value + */ +struct kfd_ioctl_svm_attribute { + __u32 type; + __u32 value; +}; + +/** + * kfd_ioctl_svm_args - Arguments for SVM ioctl + * + * @op specifies the operation to perform (see enum + * @kfd_ioctl_svm_op). @start_addr and @size are common for all + * operations. + * + * A variable number of attributes can be given in @attrs. + * @nattr specifies the number of attributes. New attributes can be + * added in the future without breaking the ABI. If unknown attributes + * are given, the function returns -EINVAL. + * + * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address + * range. It may overlap existing virtual address ranges. If it does, + * the existing ranges will be split such that the attribute changes + * only apply to the specified address range. + * + * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes + * over all memory in the given range and returns the result as the + * attribute value. If different pages have different preferred or + * prefetch locations, 0xffffffff will be returned for + * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or + * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For + * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be + * aggregated by bitwise AND. That means, a flag will be set in the + * output, if that flag is set for all pages in the range. For + * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be + * aggregated by bitwise NOR. That means, a flag will be set in the + * output, if that flag is clear for all pages in the range. + * The minimum migration granularity throughout the range will be + * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY. + * + * Querying of accessibility attributes works by initializing the + * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the + * GPUID being queried. Multiple attributes can be given to allow + * querying multiple GPUIDs. The ioctl function overwrites the + * attribute type to indicate the access for the specified GPU. + */ +struct kfd_ioctl_svm_args { + __u64 start_addr; + __u64 size; + __u32 op; + __u32 nattr; + /* Variable length array of attributes */ + struct kfd_ioctl_svm_attribute attrs[0]; +}; + +/** + * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode + * + * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process + * + * @xnack_enabled indicates whether recoverable page faults should be + * enabled for the current process. 0 means disabled, positive means + * enabled, negative means leave unchanged. If enabled, virtual address + * translations on GFXv9 and later AMD GPUs can return XNACK and retry + * the access until a valid PTE is available. This is used to implement + * device page faults. + * + * On output, @xnack_enabled returns the (new) current mode (0 or + * positive). Therefore, a negative input value can be used to query + * the current mode without changing it. + * + * The XNACK mode fundamentally changes the way SVM managed memory works + * in the driver, with subtle effects on application performance and + * functionality. + * + * Enabling XNACK mode requires shader programs to be compiled + * differently. Furthermore, not all GPUs support changing the mode + * per-process. Therefore changing the mode is only allowed while no + * user mode queues exist in the process. This ensure that no shader + * code is running that may be compiled for the wrong mode. And GPUs + * that cannot change to the requested mode will prevent the XNACK + * mode from occurring. All GPUs used by the process must be in the + * same XNACK mode. + * + * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM. + * Therefore those GPUs are not considered for the XNACK mode switch. + * + * Return: 0 on success, -errno on failure + */ +struct kfd_ioctl_set_xnack_mode_args { + __s32 xnack_enabled; +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -573,7 +737,12 @@ enum kfd_mmio_remap { #define AMDKFD_IOC_SMI_EVENTS \ AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) +#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) + +#define AMDKFD_IOC_SET_XNACK_MODE \ + AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x20 +#define AMDKFD_COMMAND_END 0x22 #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 79d9c44d1ad7..a067410ebea5 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -280,6 +280,9 @@ struct kvm_xen_exit { /* Encounter unexpected vm-exit reason */ #define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 +/* Flags that describe what fields in emulation_failure hold valid data. */ +#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0) + /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ @@ -383,6 +386,25 @@ struct kvm_run { __u32 ndata; __u64 data[16]; } internal; + /* + * KVM_INTERNAL_ERROR_EMULATION + * + * "struct emulation_failure" is an overlay of "struct internal" + * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of + * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error + * sub-types, this struct is ABI! It also needs to be backwards + * compatible with "struct internal". Take special care that + * "ndata" is correct, that new fields are enumerated in "flags", + * and that each flag enumerates fields that are 64-bit aligned + * and sized (so that ndata+internal.data[] is valid/accurate). + */ + struct { + __u32 suberror; + __u32 ndata; + __u64 flags; + __u8 insn_size; + __u8 insn_bytes[15]; + } emulation_failure; /* KVM_EXIT_OSI */ struct { __u64 gprs[32]; @@ -1083,6 +1105,13 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SGX_ATTRIBUTE 196 #define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197 #define KVM_CAP_PTP_KVM 198 +#define KVM_CAP_HYPERV_ENFORCE_CPUID 199 +#define KVM_CAP_SREGS2 200 +#define KVM_CAP_EXIT_HYPERCALL 201 +#define KVM_CAP_PPC_RPT_INVALIDATE 202 +#define KVM_CAP_BINARY_STATS_FD 203 +#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 +#define KVM_CAP_ARM_MTE 205 #ifdef KVM_CAP_IRQ_ROUTING @@ -1428,6 +1457,7 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_PMU_EVENT_FILTER */ #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) #define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3) +#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -1621,6 +1651,9 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) +#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) +#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) + struct kvm_xen_vcpu_attr { __u16 type; __u16 pad[3]; @@ -1899,4 +1932,79 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +/** + * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. + * @flags: Some extra information for header, always 0 for now. + * @name_size: The size in bytes of the memory which contains statistics + * name string including trailing '\0'. The memory is allocated + * at the send of statistics descriptor. + * @num_desc: The number of statistics the vm or vcpu has. + * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed + * by vm/vcpu stats fd. + * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file + * pointd by vm/vcpu stats fd. + * @data_offset: The offset of the vm/vcpu stats' data block in the file + * pointed by vm/vcpu stats fd. + * + * This is the header userspace needs to read from stats fd before any other + * readings. It is used by userspace to discover all the information about the + * vm/vcpu's binary statistics. + * Userspace reads this header from the start of the vm/vcpu's stats fd. + */ +struct kvm_stats_header { + __u32 flags; + __u32 name_size; + __u32 num_desc; + __u32 id_offset; + __u32 desc_offset; + __u32 data_offset; +}; + +#define KVM_STATS_TYPE_SHIFT 0 +#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT) +#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST + +#define KVM_STATS_UNIT_SHIFT 4 +#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES + +#define KVM_STATS_BASE_SHIFT 8 +#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT) +#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2 + +/** + * struct kvm_stats_desc - Descriptor of a KVM statistics. + * @flags: Annotations of the stats, like type, unit, etc. + * @exponent: Used together with @flags to determine the unit. + * @size: The number of data items for this stats. + * Every data item is of type __u64. + * @offset: The offset of the stats to the start of stat structure in + * structure kvm or kvm_vcpu. + * @bucket_size: A parameter value used for histogram stats. It is only used + * for linear histogram stats, specifying the size of the bucket; + * @name: The name string for the stats. Its size is indicated by the + * &kvm_stats_header->name_size. + */ +struct kvm_stats_desc { + __u32 flags; + __s16 exponent; + __u16 size; + __u32 offset; + __u32 bucket_size; + char name[]; +}; + +#define KVM_GET_STATS_FD _IO(KVMIO, 0xce) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 8b86609849b9..960c7e93d1a9 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -29,6 +29,7 @@ #define KVM_HC_CLOCK_PAIRING 9 #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 +#define KVM_HC_MAP_GPA_RANGE 12 /* * hypercalls use architecture specific diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h deleted file mode 100644 index 2745afd9b8fa..000000000000 --- a/include/uapi/linux/lightnvm.h +++ /dev/null @@ -1,224 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2015 CNEX Labs. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, - * USA. - */ - -#ifndef _UAPI_LINUX_LIGHTNVM_H -#define _UAPI_LINUX_LIGHTNVM_H - -#ifdef __KERNEL__ -#include <linux/const.h> -#else /* __KERNEL__ */ -#include <stdio.h> -#include <sys/ioctl.h> -#define DISK_NAME_LEN 32 -#endif /* __KERNEL__ */ - -#include <linux/types.h> -#include <linux/ioctl.h> - -#define NVM_TTYPE_NAME_MAX 48 -#define NVM_TTYPE_MAX 63 -#define NVM_MMTYPE_LEN 8 - -#define NVM_CTRL_FILE "/dev/lightnvm/control" - -struct nvm_ioctl_info_tgt { - __u32 version[3]; - __u32 reserved; - char tgtname[NVM_TTYPE_NAME_MAX]; -}; - -struct nvm_ioctl_info { - __u32 version[3]; /* in/out - major, minor, patch */ - __u16 tgtsize; /* number of targets */ - __u16 reserved16; /* pad to 4K page */ - __u32 reserved[12]; - struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX]; -}; - -enum { - NVM_DEVICE_ACTIVE = 1 << 0, -}; - -struct nvm_ioctl_device_info { - char devname[DISK_NAME_LEN]; - char bmname[NVM_TTYPE_NAME_MAX]; - __u32 bmversion[3]; - __u32 flags; - __u32 reserved[8]; -}; - -struct nvm_ioctl_get_devices { - __u32 nr_devices; - __u32 reserved[31]; - struct nvm_ioctl_device_info info[31]; -}; - -struct nvm_ioctl_create_simple { - __u32 lun_begin; - __u32 lun_end; -}; - -struct nvm_ioctl_create_extended { - __u16 lun_begin; - __u16 lun_end; - __u16 op; - __u16 rsv; -}; - -enum { - NVM_CONFIG_TYPE_SIMPLE = 0, - NVM_CONFIG_TYPE_EXTENDED = 1, -}; - -struct nvm_ioctl_create_conf { - __u32 type; - union { - struct nvm_ioctl_create_simple s; - struct nvm_ioctl_create_extended e; - }; -}; - -enum { - NVM_TARGET_FACTORY = 1 << 0, /* Init target in factory mode */ -}; - -struct nvm_ioctl_create { - char dev[DISK_NAME_LEN]; /* open-channel SSD device */ - char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */ - char tgtname[DISK_NAME_LEN]; /* dev to expose target as */ - - __u32 flags; - - struct nvm_ioctl_create_conf conf; -}; - -struct nvm_ioctl_remove { - char tgtname[DISK_NAME_LEN]; - - __u32 flags; -}; - -struct nvm_ioctl_dev_init { - char dev[DISK_NAME_LEN]; /* open-channel SSD device */ - char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */ - - __u32 flags; -}; - -enum { - NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as - * host blks or grown blks */ - NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */ - NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */ - NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */ -}; - -struct nvm_ioctl_dev_factory { - char dev[DISK_NAME_LEN]; - - __u32 flags; -}; - -struct nvm_user_vio { - __u8 opcode; - __u8 flags; - __u16 control; - __u16 nppas; - __u16 rsvd; - __u64 metadata; - __u64 addr; - __u64 ppa_list; - __u32 metadata_len; - __u32 data_len; - __u64 status; - __u32 result; - __u32 rsvd3[3]; -}; - -struct nvm_passthru_vio { - __u8 opcode; - __u8 flags; - __u8 rsvd[2]; - __u32 nsid; - __u32 cdw2; - __u32 cdw3; - __u64 metadata; - __u64 addr; - __u32 metadata_len; - __u32 data_len; - __u64 ppa_list; - __u16 nppas; - __u16 control; - __u32 cdw13; - __u32 cdw14; - __u32 cdw15; - __u64 status; - __u32 result; - __u32 timeout_ms; -}; - -/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ -enum { - /* top level cmds */ - NVM_INFO_CMD = 0x20, - NVM_GET_DEVICES_CMD, - - /* device level cmds */ - NVM_DEV_CREATE_CMD, - NVM_DEV_REMOVE_CMD, - - /* Init a device to support LightNVM media managers */ - NVM_DEV_INIT_CMD, - - /* Factory reset device */ - NVM_DEV_FACTORY_CMD, - - /* Vector user I/O */ - NVM_DEV_VIO_ADMIN_CMD = 0x41, - NVM_DEV_VIO_CMD = 0x42, - NVM_DEV_VIO_USER_CMD = 0x43, -}; - -#define NVM_IOCTL 'L' /* 0x4c */ - -#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \ - struct nvm_ioctl_info) -#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \ - struct nvm_ioctl_get_devices) -#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \ - struct nvm_ioctl_create) -#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \ - struct nvm_ioctl_remove) -#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \ - struct nvm_ioctl_dev_init) -#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \ - struct nvm_ioctl_dev_factory) - -#define NVME_NVM_IOCTL_IO_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \ - struct nvm_passthru_vio) -#define NVME_NVM_IOCTL_ADMIN_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\ - struct nvm_passthru_vio) -#define NVME_NVM_IOCTL_SUBMIT_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\ - struct nvm_user_vio) - -#define NVM_VERSION_MAJOR 1 -#define NVM_VERSION_MINOR 0 -#define NVM_VERSION_PATCHLEVEL 0 - -#endif diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index c45a4eaea667..9919f2062b14 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * lirc.h - linux infrared remote control header file - * last modified 2010/07/13 by Jarod Wilson */ #ifndef _LINUX_LIRC_H diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 568a4303ccce..2e206919125c 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -14,6 +14,7 @@ enum lwtunnel_encap_types { LWTUNNEL_ENCAP_BPF, LWTUNNEL_ENCAP_SEG6_LOCAL, LWTUNNEL_ENCAP_RPL, + LWTUNNEL_ENCAP_IOAM6, __LWTUNNEL_ENCAP_MAX, }; diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index f3956fc11de6..35687dcb1a42 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -97,5 +97,6 @@ #define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ #define Z3FOLD_MAGIC 0x33 #define PPC_CMM_MAGIC 0xc7571590 +#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */ #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h new file mode 100644 index 000000000000..52b54d13f385 --- /dev/null +++ b/include/uapi/linux/mctp.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Management Component Transport Protocol (MCTP) + * + * Copyright (c) 2021 Code Construct + * Copyright (c) 2021 Google + */ + +#ifndef __UAPI_MCTP_H +#define __UAPI_MCTP_H + +#include <linux/types.h> + +typedef __u8 mctp_eid_t; + +struct mctp_addr { + mctp_eid_t s_addr; +}; + +struct sockaddr_mctp { + unsigned short int smctp_family; + int smctp_network; + struct mctp_addr smctp_addr; + __u8 smctp_type; + __u8 smctp_tag; +}; + +#define MCTP_NET_ANY 0x0 + +#define MCTP_ADDR_NULL 0x00 +#define MCTP_ADDR_ANY 0xff + +#define MCTP_TAG_MASK 0x07 +#define MCTP_TAG_OWNER 0x08 + +#endif /* __UAPI_MCTP_H */ diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 4832fd0b5642..046d0ccba4cd 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -22,6 +22,7 @@ enum { MPOL_BIND, MPOL_INTERLEAVE, MPOL_LOCAL, + MPOL_PREFERRED_MANY, MPOL_MAX, /* always last member of enum */ }; @@ -60,7 +61,6 @@ enum { * are never OR'ed into the mode in mempolicy API arguments. */ #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ -#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index e6524ead2b7b..4d93967f8aea 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -73,7 +73,8 @@ #define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */ #define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */ #define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ -#define MOVE_MOUNT__MASK 0x00000077 +#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */ +#define MOVE_MOUNT__MASK 0x00000177 /* * fsopen() flags. @@ -120,6 +121,7 @@ enum fsconfig_command { #define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ #define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ #define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */ +#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */ /* * mount_setattr() diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 8eb3c0844bff..f66038b9551f 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -73,6 +73,7 @@ enum { #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) +#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) enum { MPTCP_PM_CMD_UNSPEC, @@ -105,6 +106,7 @@ struct mptcp_info { __u64 mptcpi_rcv_nxt; __u8 mptcpi_local_addr_used; __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; }; /* diff --git a/include/uapi/linux/n_r3964.h b/include/uapi/linux/n_r3964.h deleted file mode 100644 index 6bbd18520f30..000000000000 --- a/include/uapi/linux/n_r3964.h +++ /dev/null @@ -1,99 +0,0 @@ -/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */ -/* r3964 linediscipline for linux - * - * ----------------------------------------------------------- - * Copyright by - * Philips Automation Projects - * Kassel (Germany) - * ----------------------------------------------------------- - * This software may be used and distributed according to the terms of - * the GNU General Public License, incorporated herein by reference. - * - * Author: - * L. Haag - * - * $Log: r3964.h,v $ - * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de> - * Fixed HZ usage on 2.6 kernels - * Removed unnecessary include - * - * Revision 1.3 2001/03/18 13:02:24 dwmw2 - * Fix timer usage, use spinlocks properly. - * - * Revision 1.2 2001/03/18 12:53:15 dwmw2 - * Merge changes in 2.4.2 - * - * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2 - * This'll screw the version control - * - * Revision 1.6 1998/09/30 00:40:38 dwmw2 - * Updated to use kernel's N_R3964 if available - * - * Revision 1.4 1998/04/02 20:29:44 lhaag - * select, blocking, ... - * - * Revision 1.3 1998/02/12 18:58:43 root - * fixed some memory leaks - * calculation of checksum characters - * - * Revision 1.2 1998/02/07 13:03:17 root - * ioctl read_telegram - * - * Revision 1.1 1998/02/06 19:19:43 root - * Initial revision - * - * - */ - -#ifndef _UAPI__LINUX_N_R3964_H__ -#define _UAPI__LINUX_N_R3964_H__ - -/* line disciplines for r3964 protocol */ - - -/* - * Ioctl-commands - */ - -#define R3964_ENABLE_SIGNALS 0x5301 -#define R3964_SETPRIORITY 0x5302 -#define R3964_USE_BCC 0x5303 -#define R3964_READ_TELEGRAM 0x5304 - -/* Options for R3964_SETPRIORITY */ -#define R3964_MASTER 0 -#define R3964_SLAVE 1 - -/* Options for R3964_ENABLE_SIGNALS */ -#define R3964_SIG_ACK 0x0001 -#define R3964_SIG_DATA 0x0002 -#define R3964_SIG_ALL 0x000f -#define R3964_SIG_NONE 0x0000 -#define R3964_USE_SIGIO 0x1000 - -/* - * r3964 operation states: - */ - -/* types for msg_id: */ -enum {R3964_MSG_ACK=1, R3964_MSG_DATA }; - -#define R3964_MAX_MSG_COUNT 32 - -/* error codes for client messages */ -#define R3964_OK 0 /* no error. */ -#define R3964_TX_FAIL -1 /* transmission error, block NOT sent */ -#define R3964_OVERFLOW -2 /* msg queue overflow */ - -/* the client gets this struct when calling read(fd,...): */ -struct r3964_client_message { - int msg_id; - int arg; - int error_code; -}; - -#define R3964_MTU 256 - - - -#endif /* _UAPI__LINUX_N_R3964_H__ */ diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h index c5d0ef7aa7d5..2d0b90964227 100644 --- a/include/uapi/linux/nbd-netlink.h +++ b/include/uapi/linux/nbd-netlink.h @@ -35,6 +35,7 @@ enum { NBD_ATTR_SOCKETS, NBD_ATTR_DEAD_CONN_TIMEOUT, NBD_ATTR_DEVICE_LIST, + NBD_ATTR_BACKEND_IDENTIFIER, __NBD_ATTR_MAX, }; #define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index dc8b72201f6c..00a60695fa53 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -66,8 +66,11 @@ enum { #define NUD_NONE 0x00 /* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change - and make no address resolution or NUD. - NUD_PERMANENT also cannot be deleted by garbage collectors. + * and make no address resolution or NUD. + * NUD_PERMANENT also cannot be deleted by garbage collectors. + * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry + * states don't make sense and thus are ignored. Such entries don't age and + * can roam. */ struct nda_cacheinfo { diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 7ed0b3d1c00a..fcc61c73a666 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -13,7 +13,7 @@ #include <linux/types.h> #include <linux/socket.h> /* for SO_TIMESTAMPING */ -/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +/* SO_TIMESTAMPING flags */ enum { SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), @@ -30,8 +30,9 @@ enum { SOF_TIMESTAMPING_OPT_STATS = (1<<12), SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), + SOF_TIMESTAMPING_BIND_PHC = (1 << 15), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -47,6 +48,18 @@ enum { SOF_TIMESTAMPING_TX_ACK) /** + * struct so_timestamping - SO_TIMESTAMPING parameter + * + * @flags: SO_TIMESTAMPING flags + * @bind_phc: Index of PTP virtual clock bound to sock. This is available + * if flag SOF_TIMESTAMPING_BIND_PHC is set. + */ +struct so_timestamping { + int flags; + int bind_phc; +}; + +/** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 1fb4ca18ffbb..e94d1fa554cb 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -813,11 +813,13 @@ enum nft_exthdr_flags { * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers * @NFT_EXTHDR_OP_TCP: match against tcp options * @NFT_EXTHDR_OP_IPV4: match against ipv4 options + * @NFT_EXTHDR_OP_SCTP: match against sctp chunks */ enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6, NFT_EXTHDR_OP_TCPOPT, NFT_EXTHDR_OP_IPV4, + NFT_EXTHDR_OP_SCTP, __NFT_EXTHDR_OP_MAX }; #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) @@ -1194,6 +1196,21 @@ enum nft_counter_attributes { #define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1) /** + * enum nft_last_attributes - nf_tables last expression netlink attributes + * + * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32) + * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64) + */ +enum nft_last_attributes { + NFTA_LAST_UNSPEC, + NFTA_LAST_SET, + NFTA_LAST_MSECS, + NFTA_LAST_PAD, + __NFTA_LAST_MAX +}; +#define NFTA_LAST_MAX (__NFTA_LAST_MAX - 1) + +/** * enum nft_log_attributes - nf_tables log expression netlink attributes * * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32) diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h index 5bc960f220b3..6cd58cd2a6f0 100644 --- a/include/uapi/linux/netfilter/nfnetlink.h +++ b/include/uapi/linux/netfilter/nfnetlink.h @@ -60,7 +60,8 @@ struct nfgenmsg { #define NFNL_SUBSYS_CTHELPER 9 #define NFNL_SUBSYS_NFTABLES 10 #define NFNL_SUBSYS_NFT_COMPAT 11 -#define NFNL_SUBSYS_COUNT 12 +#define NFNL_SUBSYS_HOOK 12 +#define NFNL_SUBSYS_COUNT 13 /* Reserved control nfnetlink messages */ #define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index d8484be72fdc..c2ac7269acf7 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -56,6 +56,7 @@ enum ctattr_type { CTA_LABELS_MASK, CTA_SYNPROXY, CTA_FILTER, + CTA_STATUS_MASK, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) @@ -257,6 +258,7 @@ enum ctattr_stats_cpu { CTA_STATS_ERROR, CTA_STATS_SEARCH_RESTART, CTA_STATS_CLASH_RESOLVE, + CTA_STATS_CHAIN_TOOLONG, __CTA_STATS_MAX, }; #define CTA_STATS_MAX (__CTA_STATS_MAX - 1) diff --git a/include/uapi/linux/netfilter/nfnetlink_hook.h b/include/uapi/linux/netfilter/nfnetlink_hook.h new file mode 100644 index 000000000000..bbcd285b22e1 --- /dev/null +++ b/include/uapi/linux/netfilter/nfnetlink_hook.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _NFNL_HOOK_H_ +#define _NFNL_HOOK_H_ + +enum nfnl_hook_msg_types { + NFNL_MSG_HOOK_GET, + NFNL_MSG_HOOK_MAX, +}; + +/** + * enum nfnl_hook_attributes - netfilter hook netlink attributes + * + * @NFNLA_HOOK_HOOKNUM: netfilter hook number (NLA_U32) + * @NFNLA_HOOK_PRIORITY: netfilter hook priority (NLA_U32) + * @NFNLA_HOOK_DEV: netdevice name (NLA_STRING) + * @NFNLA_HOOK_FUNCTION_NAME: hook function name (NLA_STRING) + * @NFNLA_HOOK_MODULE_NAME: kernel module that registered this hook (NLA_STRING) + * @NFNLA_HOOK_CHAIN_INFO: basechain hook metadata (NLA_NESTED) + */ +enum nfnl_hook_attributes { + NFNLA_HOOK_UNSPEC, + NFNLA_HOOK_HOOKNUM, + NFNLA_HOOK_PRIORITY, + NFNLA_HOOK_DEV, + NFNLA_HOOK_FUNCTION_NAME, + NFNLA_HOOK_MODULE_NAME, + NFNLA_HOOK_CHAIN_INFO, + __NFNLA_HOOK_MAX +}; +#define NFNLA_HOOK_MAX (__NFNLA_HOOK_MAX - 1) + +/** + * enum nfnl_hook_chain_info_attributes - chain description + * + * NFNLA_HOOK_INFO_DESC: nft chain and table name (enum nft_table_attributes) (NLA_NESTED) + * NFNLA_HOOK_INFO_TYPE: chain type (enum nfnl_hook_chaintype) (NLA_U32) + */ +enum nfnl_hook_chain_info_attributes { + NFNLA_HOOK_INFO_UNSPEC, + NFNLA_HOOK_INFO_DESC, + NFNLA_HOOK_INFO_TYPE, + __NFNLA_HOOK_INFO_MAX, +}; +#define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1) + +enum nfnl_hook_chain_desc_attributes { + NFNLA_CHAIN_UNSPEC, + NFNLA_CHAIN_TABLE, + NFNLA_CHAIN_FAMILY, + NFNLA_CHAIN_NAME, + __NFNLA_CHAIN_MAX, +}; +#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1) + +/** + * enum nfnl_hook_chaintype - chain type + * + * @NFNL_HOOK_TYPE_NFTABLES nf_tables base chain + */ +enum nfnl_hook_chaintype { + NFNL_HOOK_TYPE_NFTABLES = 0x1, +}; + +#endif /* _NFNL_HOOK_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 45c8d3b027e0..0af9c113d665 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -61,7 +61,7 @@ enum nfulnl_attr_type { NFULA_HWTYPE, /* hardware type */ NFULA_HWHEADER, /* hardware header */ NFULA_HWLEN, /* hardware header length */ - NFULA_CT, /* nf_conntrack_netlink.h */ + NFULA_CT, /* nfnetlink_conntrack.h */ NFULA_CT_INFO, /* enum ip_conntrack_info */ NFULA_VLAN, /* nested attribute: packet vlan info */ NFULA_L2HDR, /* full L2 header */ diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index bcb2cb5d40b9..aed90c4df0c8 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -51,11 +51,11 @@ enum nfqnl_attr_type { NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ NFQA_HWADDR, /* nfqnl_msg_packet_hw */ NFQA_PAYLOAD, /* opaque data payload */ - NFQA_CT, /* nf_conntrack_netlink.h */ + NFQA_CT, /* nfnetlink_conntrack.h */ NFQA_CT_INFO, /* enum ip_conntrack_info */ NFQA_CAP_LEN, /* __u32 length of captured packet */ NFQA_SKB_INFO, /* __u32 skb meta information */ - NFQA_EXP, /* nf_conntrack_netlink.h */ + NFQA_EXP, /* nfnetlink_conntrack.h */ NFQA_UID, /* __u32 sk uid */ NFQA_GID, /* __u32 sk gid */ NFQA_SECCTX, /* security context string */ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 3d94269bbfa8..4c0cde075c27 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -91,9 +91,10 @@ struct nlmsghdr { #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN) #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) -#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) +#define NLMSG_DATA(nlh) ((void *)(((char *)nlh) + NLMSG_HDRLEN)) #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ - (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) + (struct nlmsghdr *)(((char *)(nlh)) + \ + NLMSG_ALIGN((nlh)->nlmsg_len))) #define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len <= (len)) diff --git a/include/uapi/linux/nfsd/nfsfh.h b/include/uapi/linux/nfsd/nfsfh.h index 427294dd56a1..e29e8accc4f4 100644 --- a/include/uapi/linux/nfsd/nfsfh.h +++ b/include/uapi/linux/nfsd/nfsfh.h @@ -33,7 +33,6 @@ struct nfs_fhbase_old { /* * This is the new flexible, extensible style NFSv2/v3/v4 file handle. - * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000 * * The file handle starts with a sequence of four-byte words. * The first word contains a version number (1) and three descriptor bytes diff --git a/include/uapi/linux/nl80211-vnd-intel.h b/include/uapi/linux/nl80211-vnd-intel.h new file mode 100644 index 000000000000..0bf177b84fd9 --- /dev/null +++ b/include/uapi/linux/nl80211-vnd-intel.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#ifndef __VENDOR_CMD_INTEL_H__ +#define __VENDOR_CMD_INTEL_H__ + +#define INTEL_OUI 0x001735 + +/** + * enum iwl_mvm_vendor_cmd - supported vendor commands + * @IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO: reports CSME connection info. + * @IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP: asks for ownership on the device. + * @IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT: notifies if roaming is allowed. + * It contains a &IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN and a + * &IWL_MVM_VENDOR_ATTR_VIF_ADDR attributes. + */ + +enum iwl_mvm_vendor_cmd { + IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO = 0x2d, + IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP = 0x30, + IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT = 0x32, +}; + +enum iwl_vendor_auth_akm_mode { + IWL_VENDOR_AUTH_OPEN, + IWL_VENDOR_AUTH_RSNA = 0x6, + IWL_VENDOR_AUTH_RSNA_PSK, + IWL_VENDOR_AUTH_SAE = 0x9, + IWL_VENDOR_AUTH_MAX, +}; + +/** + * enum iwl_mvm_vendor_attr - attributes used in vendor commands + * @__IWL_MVM_VENDOR_ATTR_INVALID: attribute 0 is invalid + * @IWL_MVM_VENDOR_ATTR_VIF_ADDR: interface MAC address + * @IWL_MVM_VENDOR_ATTR_ADDR: MAC address + * @IWL_MVM_VENDOR_ATTR_SSID: SSID (binary attribute, 0..32 octets) + * @IWL_MVM_VENDOR_ATTR_STA_CIPHER: the cipher to use for the station with the + * mac address specified in &IWL_MVM_VENDOR_ATTR_ADDR. + * @IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN: u8 attribute. Indicates whether + * roaming is forbidden or not. Value 1 means roaming is forbidden, + * 0 mean roaming is allowed. + * @IWL_MVM_VENDOR_ATTR_AUTH_MODE: u32 attribute. Authentication mode type + * as specified in &enum iwl_vendor_auth_akm_mode. + * @IWL_MVM_VENDOR_ATTR_CHANNEL_NUM: u8 attribute. Contains channel number. + * @IWL_MVM_VENDOR_ATTR_BAND: u8 attribute. + * 0 for 2.4 GHz band, 1 for 5.2GHz band and 2 for 6GHz band. + * @IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL: u32 attribute. Channel number of + * collocated AP. Relevant for 6GHz AP info. + * @IWL_MVM_VENDOR_ATTR_COLLOC_ADDR: MAC address of a collocated AP. + * Relevant for 6GHz AP info. + * + * @NUM_IWL_MVM_VENDOR_ATTR: number of vendor attributes + * @MAX_IWL_MVM_VENDOR_ATTR: highest vendor attribute number + + */ +enum iwl_mvm_vendor_attr { + __IWL_MVM_VENDOR_ATTR_INVALID = 0x00, + IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02, + IWL_MVM_VENDOR_ATTR_ADDR = 0x0a, + IWL_MVM_VENDOR_ATTR_SSID = 0x3d, + IWL_MVM_VENDOR_ATTR_STA_CIPHER = 0x51, + IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN = 0x64, + IWL_MVM_VENDOR_ATTR_AUTH_MODE = 0x65, + IWL_MVM_VENDOR_ATTR_CHANNEL_NUM = 0x66, + IWL_MVM_VENDOR_ATTR_BAND = 0x69, + IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL = 0x70, + IWL_MVM_VENDOR_ATTR_COLLOC_ADDR = 0x71, + + NUM_IWL_MVM_VENDOR_ATTR, + MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1, +}; + +#endif /* __VENDOR_CMD_INTEL_H__ */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f962c06e9818..c2efea98e060 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1185,6 +1185,21 @@ * passed using %NL80211_ATTR_SAR_SPEC. %NL80211_ATTR_WIPHY is used to * specify the wiphy index to be applied to. * + * @NL80211_CMD_OBSS_COLOR_COLLISION: This notification is sent out whenever + * mac80211/drv detects a bss color collision. + * + * @NL80211_CMD_COLOR_CHANGE_REQUEST: This command is used to indicate that + * userspace wants to change the BSS color. + * + * @NL80211_CMD_COLOR_CHANGE_STARTED: Notify userland, that a color change has + * started + * + * @NL80211_CMD_COLOR_CHANGE_ABORTED: Notify userland, that the color change has + * been aborted + * + * @NL80211_CMD_COLOR_CHANGE_COMPLETED: Notify userland that the color change + * has completed + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1417,6 +1432,14 @@ enum nl80211_commands { NL80211_CMD_SET_SAR_SPECS, + NL80211_CMD_OBSS_COLOR_COLLISION, + + NL80211_CMD_COLOR_CHANGE_REQUEST, + + NL80211_CMD_COLOR_CHANGE_STARTED, + NL80211_CMD_COLOR_CHANGE_ABORTED, + NL80211_CMD_COLOR_CHANGE_COMPLETED, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2560,6 +2583,16 @@ enum nl80211_commands { * disassoc events to indicate that an immediate reconnect to the AP * is desired. * + * @NL80211_ATTR_OBSS_COLOR_BITMAP: bitmap of the u64 BSS colors for the + * %NL80211_CMD_OBSS_COLOR_COLLISION event. + * + * @NL80211_ATTR_COLOR_CHANGE_COUNT: u8 attribute specifying the number of TBTT's + * until the color switch event. + * @NL80211_ATTR_COLOR_CHANGE_COLOR: u8 attribute specifying the color that we are + * switching to + * @NL80211_ATTR_COLOR_CHANGE_ELEMS: Nested set of attributes containing the IE + * information for the time while performing a color switch. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3057,6 +3090,12 @@ enum nl80211_attrs { NL80211_ATTR_DISABLE_HE, + NL80211_ATTR_OBSS_COLOR_BITMAP, + + NL80211_ATTR_COLOR_CHANGE_COUNT, + NL80211_ATTR_COLOR_CHANGE_COLOR, + NL80211_ATTR_COLOR_CHANGE_ELEMS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3654,6 +3693,8 @@ enum nl80211_mpath_info { * defined * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16), * given for all 6 GHz band channels + * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are + * advertised on this band/for this iftype (binary) * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use */ enum nl80211_band_iftype_attr { @@ -3665,6 +3706,7 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, @@ -5950,6 +5992,9 @@ enum nl80211_feature_flags { * frame protection for all management frames exchanged during the * negotiation and range measurement procedure. * + * @NL80211_EXT_FEATURE_BSS_COLOR: The driver supports BSS color collision + * detection and change announcemnts. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -6014,6 +6059,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SECURE_LTF, NL80211_EXT_FEATURE_SECURE_RTT, NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE, + NL80211_EXT_FEATURE_BSS_COLOR, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -6912,6 +6958,9 @@ enum nl80211_peer_measurement_ftm_capa { * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only * valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set. + * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the + * responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED + * or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set. * * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number @@ -6931,6 +6980,7 @@ enum nl80211_peer_measurement_ftm_req { NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED, NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED, NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK, + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR, /* keep last */ NUM_NL80211_PMSR_FTM_REQ_ATTR, diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 8d16744edc31..150bcff49b1c 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -70,6 +70,8 @@ enum ovs_datapath_cmd { * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should * not be sent. + * @OVS_DP_ATTR_PER_CPU_PIDS: Per-cpu array of PIDs for upcalls when + * OVS_DP_F_DISPATCH_UPCALL_PER_CPU feature is set. * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the * datapath. Always present in notifications. * @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the @@ -87,6 +89,9 @@ enum ovs_datapath_attr { OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */ OVS_DP_ATTR_PAD, OVS_DP_ATTR_MASKS_CACHE_SIZE, + OVS_DP_ATTR_PER_CPU_PIDS, /* Netlink PIDS to receive upcalls in + * per-cpu dispatch mode + */ __OVS_DP_ATTR_MAX }; @@ -127,6 +132,9 @@ struct ovs_vport_stats { /* Allow tc offload recirc sharing */ #define OVS_DP_F_TC_RECIRC_SHARING (1 << 2) +/* Allow per-cpu dispatch of upcalls */ +#define OVS_DP_F_DISPATCH_UPCALL_PER_CPU (1 << 3) + /* Fixed logical ports. */ #define OVSP_LOCAL ((__u32)0) diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index c3ab4c826297..f9c1af8d141b 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/** +/* * pcitest.h - PCI test uapi defines * * Copyright (C) 2017 Texas Instruments diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 025c40fef93d..6836ccb9c45d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -22,6 +22,7 @@ enum { __TCA_ACT_MAX }; +/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */ #define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for * actions stats. */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 79a699f106b1..ec88590b3198 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -827,6 +827,8 @@ struct tc_codel_xstats { /* FQ_CODEL */ +#define FQ_CODEL_QUANTUM_MAX (1 << 20) + enum { TCA_FQ_CODEL_UNSPEC, TCA_FQ_CODEL_TARGET, diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 18a9f59dc067..43bd7f713c39 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -213,6 +213,7 @@ struct prctl_mm_map { /* Speculation control variants */ # define PR_SPEC_STORE_BYPASS 0 # define PR_SPEC_INDIRECT_BRANCH 1 +# define PR_SPEC_L1D_FLUSH 2 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */ # define PR_SPEC_NOT_AFFECTED 0 # define PR_SPEC_PRCTL (1UL << 0) @@ -234,14 +235,15 @@ struct prctl_mm_map { #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) /* MTE tag check fault modes */ -# define PR_MTE_TCF_SHIFT 1 -# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) -# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) -# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) -# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_NONE 0 +# define PR_MTE_TCF_SYNC (1UL << 1) +# define PR_MTE_TCF_ASYNC (1UL << 2) +# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC) /* MTE tag inclusion mask */ # define PR_MTE_TAG_SHIFT 3 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) +/* Unused; kept only for source compatibility */ +# define PR_MTE_TCF_SHIFT 1 /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 @@ -259,4 +261,12 @@ struct prctl_mm_map { #define PR_PAC_SET_ENABLED_KEYS 60 #define PR_PAC_GET_ENABLED_KEYS 61 +/* Request the scheduler to share a core */ +#define PR_SCHED_CORE 62 +# define PR_SCHED_CORE_GET 0 +# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */ +# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */ +# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */ +# define PR_SCHED_CORE_MAX 4 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h deleted file mode 100644 index 47874919d0b9..000000000000 --- a/include/uapi/linux/raw.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __LINUX_RAW_H -#define __LINUX_RAW_H - -#include <linux/types.h> - -#define RAW_SETBIND _IO( 0xac, 0 ) -#define RAW_GETBIND _IO( 0xac, 1 ) - -struct raw_config_request -{ - int raw_minor; - __u64 block_major; - __u64 block_minor; -}; - -#endif /* __LINUX_RAW_H */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index cb78e7a739da..c4ff1ebd8bcc 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -141,6 +141,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131 #define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE #define SCTP_REMOTE_UDP_ENCAPS_PORT 132 +#define SCTP_PLPMTUD_PROBE_INTERVAL 133 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -1213,4 +1214,11 @@ enum sctp_sched_type { SCTP_SS_MAX = SCTP_SS_RR }; +/* Probe Interval socket option */ +struct sctp_probeinterval { + sctp_assoc_t spi_assoc_id; + struct sockaddr_storage spi_address; + __u32 spi_interval; +}; + #endif /* _UAPI_SCTP_H */ diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 6ba18b82a02e..78074254ab98 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -115,6 +115,7 @@ struct seccomp_notif_resp { /* valid flags for seccomp_notif_addfd */ #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ +#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ /** * struct seccomp_notif_addfd diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h index 5ae3ace84de0..332b18f318f8 100644 --- a/include/uapi/linux/seg6_local.h +++ b/include/uapi/linux/seg6_local.h @@ -64,6 +64,8 @@ enum { SEG6_LOCAL_ACTION_END_AM = 14, /* custom BPF action */ SEG6_LOCAL_ACTION_END_BPF = 15, + /* decap and lookup of DA in v4 or v6 table */ + SEG6_LOCAL_ACTION_END_DT46 = 16, __SEG6_LOCAL_ACTION_MAX, }; diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index be07b5470f4b..f51bc8f36813 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -62,6 +62,7 @@ * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 * TI16C752: 8 16 56 60 8 16 32 56 + * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950 * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA */ #define UART_FCR_R_TRIG_00 0x00 diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 3e68da07fba2..0f7f87c70baf 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -47,6 +47,8 @@ enum { SMC_NETLINK_GET_LGR_SMCD, SMC_NETLINK_GET_DEV_SMCD, SMC_NETLINK_GET_DEV_SMCR, + SMC_NETLINK_GET_STATS, + SMC_NETLINK_GET_FBACK_STATS, }; /* SMC_GENL_FAMILY top level attributes */ @@ -58,6 +60,8 @@ enum { SMC_GEN_LGR_SMCD, /* nest */ SMC_GEN_DEV_SMCD, /* nest */ SMC_GEN_DEV_SMCR, /* nest */ + SMC_GEN_STATS, /* nest */ + SMC_GEN_FBACK_STATS, /* nest */ __SMC_GEN_MAX, SMC_GEN_MAX = __SMC_GEN_MAX - 1 }; @@ -159,4 +163,83 @@ enum { SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1 }; +/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */ +/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */ +enum { + SMC_NLA_STATS_PLOAD_PAD, + SMC_NLA_STATS_PLOAD_8K, /* u64 */ + SMC_NLA_STATS_PLOAD_16K, /* u64 */ + SMC_NLA_STATS_PLOAD_32K, /* u64 */ + SMC_NLA_STATS_PLOAD_64K, /* u64 */ + SMC_NLA_STATS_PLOAD_128K, /* u64 */ + SMC_NLA_STATS_PLOAD_256K, /* u64 */ + SMC_NLA_STATS_PLOAD_512K, /* u64 */ + SMC_NLA_STATS_PLOAD_1024K, /* u64 */ + SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */ + __SMC_NLA_STATS_PLOAD_MAX, + SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1 +}; + +/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */ +enum { + SMC_NLA_STATS_RMB_PAD, + SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */ + SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */ + SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */ + SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */ + SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */ + SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */ + SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */ + __SMC_NLA_STATS_RMB_MAX, + SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1 +}; + +/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */ +enum { + SMC_NLA_STATS_T_PAD, + SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */ + SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */ + SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */ + SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */ + SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */ + SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */ + SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */ + SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */ + SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */ + SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */ + SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */ + SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */ + SMC_NLA_STATS_T_CORK_CNT, /* u64 */ + SMC_NLA_STATS_T_NDLY_CNT, /* u64 */ + SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */ + SMC_NLA_STATS_T_RX_BYTES, /* u64 */ + SMC_NLA_STATS_T_TX_BYTES, /* u64 */ + SMC_NLA_STATS_T_RX_CNT, /* u64 */ + SMC_NLA_STATS_T_TX_CNT, /* u64 */ + __SMC_NLA_STATS_T_MAX, + SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1 +}; + +/* SMC_GEN_STATS attributes */ +enum { + SMC_NLA_STATS_PAD, + SMC_NLA_STATS_SMCD_TECH, /* nest */ + SMC_NLA_STATS_SMCR_TECH, /* nest */ + SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */ + SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */ + __SMC_NLA_STATS_MAX, + SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1 +}; + +/* SMC_GEN_FBACK_STATS attributes */ +enum { + SMC_NLA_FBACK_STATS_PAD, + SMC_NLA_FBACK_STATS_TYPE, /* u8 */ + SMC_NLA_FBACK_STATS_SRV_CNT, /* u64 */ + SMC_NLA_FBACK_STATS_CLNT_CNT, /* u64 */ + SMC_NLA_FBACK_STATS_RSN_CODE, /* u32 */ + SMC_NLA_FBACK_STATS_RSN_CNT, /* u16 */ + __SMC_NLA_FBACK_STATS_MAX, + SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1 +}; #endif /* _UAPI_LINUX_SMC_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 26fc60ce9298..904909d020e2 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -290,6 +290,8 @@ enum LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */ LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */ LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */ + LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */ + LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index c3409c8ec0dd..eb0a9a5b6e71 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -26,4 +26,9 @@ struct __kernel_sockaddr_storage { }; }; +#define SOCK_SNDBUF_LOCK 1 +#define SOCK_RCVBUF_LOCK 2 + +#define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) + #endif /* _UAPI_LINUX_SOCKET_H */ diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h index fbcce04abfe9..08f46b60b151 100644 --- a/include/uapi/linux/surface_aggregator/cdev.h +++ b/include/uapi/linux/surface_aggregator/cdev.h @@ -6,7 +6,7 @@ * device. This device provides direct user-space access to the SSAM EC. * Intended for debugging and development. * - * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com> + * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com> */ #ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H @@ -73,6 +73,75 @@ struct ssam_cdev_request { } response; } __attribute__((__packed__)); -#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request) +/** + * struct ssam_cdev_notifier_desc - Notifier descriptor. + * @priority: Priority value determining the order in which notifier + * callbacks will be called. A higher value means higher + * priority, i.e. the associated callback will be executed + * earlier than other (lower priority) callbacks. + * @target_category: The event target category for which this notifier should + * receive events. + * + * Specifies the notifier that should be registered or unregistered, + * specifically with which priority and for which target category of events. + */ +struct ssam_cdev_notifier_desc { + __s32 priority; + __u8 target_category; +} __attribute__((__packed__)); + +/** + * struct ssam_cdev_event_desc - Event descriptor. + * @reg: Registry via which the event will be enabled/disabled. + * @reg.target_category: Target category for the event registry requests. + * @reg.target_id: Target ID for the event registry requests. + * @reg.cid_enable: Command ID for the event-enable request. + * @reg.cid_disable: Command ID for the event-disable request. + * @id: ID specifying the event. + * @id.target_category: Target category of the event source. + * @id.instance: Instance ID of the event source. + * @flags: Flags used for enabling the event. + * + * Specifies which event should be enabled/disabled and how to do that. + */ +struct ssam_cdev_event_desc { + struct { + __u8 target_category; + __u8 target_id; + __u8 cid_enable; + __u8 cid_disable; + } reg; + + struct { + __u8 target_category; + __u8 instance; + } id; + + __u8 flags; +} __attribute__((__packed__)); + +/** + * struct ssam_cdev_event - SSAM event sent by the EC. + * @target_category: Target category of the event source. See &enum ssam_ssh_tc. + * @target_id: Target ID of the event source. + * @command_id: Command ID of the event. + * @instance_id: Instance ID of the event source. + * @length: Length of the event payload in bytes. + * @data: Event payload data. + */ +struct ssam_cdev_event { + __u8 target_category; + __u8 target_id; + __u8 command_id; + __u8 instance_id; + __u16 length; + __u8 data[]; +} __attribute__((__packed__)); + +#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request) +#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc) +#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc) +#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc) +#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc) #endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 95b1597f16ae..27ace512babd 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -46,6 +46,7 @@ #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ #define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */ #define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */ +#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */ struct tcmu_mailbox { __u16 version; @@ -75,6 +76,7 @@ struct tcmu_cmd_entry_hdr { __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 +#define TCMU_UFLAG_KEEP_BUF 0x4 __u8 uflags; } __packed; diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h index c525b3503797..af6ef2cfbf3d 100644 --- a/include/uapi/linux/tc_act/tc_skbmod.h +++ b/include/uapi/linux/tc_act/tc_skbmod.h @@ -17,6 +17,7 @@ #define SKBMOD_F_SMAC 0x2 #define SKBMOD_F_ETYPE 0x4 #define SKBMOD_F_SWAPMAC 0x8 +#define SKBMOD_F_ECN 0x10 struct tc_skbmod { tc_gen; diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index bafbeb1a2624..05b31d60acf6 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -31,7 +31,8 @@ UFFD_FEATURE_MISSING_SHMEM | \ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ - UFFD_FEATURE_MINOR_HUGETLBFS) + UFFD_FEATURE_MINOR_HUGETLBFS | \ + UFFD_FEATURE_MINOR_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -80,8 +81,8 @@ struct uffdio_zeropage) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) -#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \ - struct uffdio_continue) +#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \ + struct uffdio_continue) /* read() structure */ struct uffd_msg { @@ -185,6 +186,9 @@ struct uffdio_api { * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults * can be intercepted (via REGISTER_MODE_MINOR) for * hugetlbfs-backed pages. + * + * UFFD_FEATURE_MINOR_SHMEM indicates the same support as + * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -196,6 +200,7 @@ struct uffdio_api { #define UFFD_FEATURE_SIGBUS (1<<7) #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) +#define UFFD_FEATURE_MINOR_SHMEM (1<<10) __u64 features; __u64 ioctls; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index d43bec5f1afd..5532b5f68493 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -50,6 +50,7 @@ #ifndef __LINUX_V4L2_CONTROLS_H #define __LINUX_V4L2_CONTROLS_H +#include <linux/const.h> #include <linux/types.h> /* Control classes */ @@ -434,6 +435,7 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233) #define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234) #define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235) +#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236) /* CIDs for the MPEG-2 Part 2 (H.262) codec */ #define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270) @@ -1602,30 +1604,30 @@ struct v4l2_ctrl_h264_decode_params { #define V4L2_FWHT_VERSION 3 /* Set if this is an interlaced format */ -#define V4L2_FWHT_FL_IS_INTERLACED BIT(0) +#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0) /* Set if this is a bottom-first (NTSC) interlaced format */ -#define V4L2_FWHT_FL_IS_BOTTOM_FIRST BIT(1) +#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1) /* Set if each 'frame' contains just one field */ -#define V4L2_FWHT_FL_IS_ALTERNATE BIT(2) +#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2) /* * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this * 'frame' is the bottom field, else it is the top field. */ -#define V4L2_FWHT_FL_IS_BOTTOM_FIELD BIT(3) +#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3) /* Set if the Y' plane is uncompressed */ -#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED BIT(4) +#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4) /* Set if the Cb plane is uncompressed */ -#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED BIT(5) +#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5) /* Set if the Cr plane is uncompressed */ -#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED BIT(6) +#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6) /* Set if the chroma plane is full height, if cleared it is half height */ -#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT BIT(7) +#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7) /* Set if the chroma plane is full width, if cleared it is half width */ -#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH BIT(8) +#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8) /* Set if the alpha plane is uncompressed */ -#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9) +#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9) /* Set if this is an I Frame */ -#define V4L2_FWHT_FL_I_FRAME BIT(10) +#define V4L2_FWHT_FL_I_FRAME _BITUL(10) /* A 4-values flag - the number of components - 1 */ #define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16) @@ -1862,6 +1864,118 @@ struct v4l2_ctrl_vp8_frame { __u64 flags; }; +/* Stateless MPEG-2 controls */ + +#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01 + +#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220) +/** + * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header + * + * All the members on this structure match the sequence header and sequence + * extension syntaxes as specified by the MPEG-2 specification. + * + * Fields horizontal_size, vertical_size and vbv_buffer_size are a + * combination of respective _value and extension syntax elements, + * as described in section 6.3.3 "Sequence header". + * + * @horizontal_size: combination of elements horizontal_size_value and + * horizontal_size_extension. + * @vertical_size: combination of elements vertical_size_value and + * vertical_size_extension. + * @vbv_buffer_size: combination of elements vbv_buffer_size_value and + * vbv_buffer_size_extension. + * @profile_and_level_indication: see MPEG-2 specification. + * @chroma_format: see MPEG-2 specification. + * @flags: see V4L2_MPEG2_SEQ_FLAG_{}. + */ +struct v4l2_ctrl_mpeg2_sequence { + __u16 horizontal_size; + __u16 vertical_size; + __u32 vbv_buffer_size; + __u16 profile_and_level_indication; + __u8 chroma_format; + __u8 flags; +}; + +#define V4L2_MPEG2_PIC_CODING_TYPE_I 1 +#define V4L2_MPEG2_PIC_CODING_TYPE_P 2 +#define V4L2_MPEG2_PIC_CODING_TYPE_B 3 +#define V4L2_MPEG2_PIC_CODING_TYPE_D 4 + +#define V4L2_MPEG2_PIC_TOP_FIELD 0x1 +#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2 +#define V4L2_MPEG2_PIC_FRAME 0x3 + +#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001 +#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002 +#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004 +#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008 +#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010 +#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020 +#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040 +#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080 + +#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221) +/** + * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header + * + * All the members on this structure match the picture header and picture + * coding extension syntaxes as specified by the MPEG-2 specification. + * + * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for backward prediction. + * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for forward prediction. These timestamp refers to the + * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns() + * to convert the struct timeval to a __u64. + * @flags: see V4L2_MPEG2_PIC_FLAG_{}. + * @f_code: see MPEG-2 specification. + * @picture_coding_type: see MPEG-2 specification. + * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD. + * @intra_dc_precision: see MPEG-2 specification. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_ctrl_mpeg2_picture { + __u64 backward_ref_ts; + __u64 forward_ref_ts; + __u32 flags; + __u8 f_code[2][2]; + __u8 picture_coding_type; + __u8 picture_structure; + __u8 intra_dc_precision; + __u8 reserved[5]; +}; + +#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222) +/** + * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation + * + * Quantisation matrices as specified by section 6.3.7 + * "Quant matrix extension". + * + * @intra_quantiser_matrix: The quantisation matrix coefficients + * for intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @non_intra_quantiser_matrix: The quantisation matrix coefficients + * for non-intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chominance component of intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chrominance component of non-intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + */ +struct v4l2_ctrl_mpeg2_quantisation { + __u8 intra_quantiser_matrix[64]; + __u8 non_intra_quantiser_matrix[64]; + __u8 chroma_intra_quantiser_matrix[64]; + __u8 chroma_non_intra_quantiser_matrix[64]; +}; + #define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900) #define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1) diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h new file mode 100644 index 000000000000..7cfe1c1280c0 --- /dev/null +++ b/include/uapi/linux/vduse.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_VDUSE_H_ +#define _UAPI_VDUSE_H_ + +#include <linux/types.h> + +#define VDUSE_BASE 0x81 + +/* The ioctls for control device (/dev/vduse/control) */ + +#define VDUSE_API_VERSION 0 + +/* + * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION). + * This is used for future extension. + */ +#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64) + +/* Set the version of VDUSE API that userspace supported. */ +#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64) + +/** + * struct vduse_dev_config - basic configuration of a VDUSE device + * @name: VDUSE device name, needs to be NUL terminated + * @vendor_id: virtio vendor id + * @device_id: virtio device id + * @features: virtio features + * @vq_num: the number of virtqueues + * @vq_align: the allocation alignment of virtqueue's metadata + * @reserved: for future use, needs to be initialized to zero + * @config_size: the size of the configuration space + * @config: the buffer of the configuration space + * + * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device. + */ +struct vduse_dev_config { +#define VDUSE_NAME_MAX 256 + char name[VDUSE_NAME_MAX]; + __u32 vendor_id; + __u32 device_id; + __u64 features; + __u32 vq_num; + __u32 vq_align; + __u32 reserved[13]; + __u32 config_size; + __u8 config[]; +}; + +/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */ +#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config) + +/* + * Destroy a VDUSE device. Make sure there are no more references + * to the char device (/dev/vduse/$NAME). + */ +#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX]) + +/* The ioctls for VDUSE device (/dev/vduse/$NAME) */ + +/** + * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last] + * @offset: the mmap offset on returned file descriptor + * @start: start of the IOVA region + * @last: last of the IOVA region + * @perm: access permission of the IOVA region + * + * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region. + */ +struct vduse_iotlb_entry { + __u64 offset; + __u64 start; + __u64 last; +#define VDUSE_ACCESS_RO 0x1 +#define VDUSE_ACCESS_WO 0x2 +#define VDUSE_ACCESS_RW 0x3 + __u8 perm; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return the corresponding file descriptor. Return -EINVAL means the + * IOVA region doesn't exist. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry) + +/* + * Get the negotiated virtio features. It's a subset of the features in + * struct vduse_dev_config which can be accepted by virtio driver. It's + * only valid after FEATURES_OK status bit is set. + */ +#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64) + +/** + * struct vduse_config_data - data used to update configuration space + * @offset: the offset from the beginning of configuration space + * @length: the length to write to configuration space + * @buffer: the buffer used to write from + * + * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device + * configuration space. + */ +struct vduse_config_data { + __u32 offset; + __u32 length; + __u8 buffer[]; +}; + +/* Set device configuration space */ +#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data) + +/* + * Inject a config interrupt. It's usually used to notify virtio driver + * that device configuration space has changed. + */ +#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13) + +/** + * struct vduse_vq_config - basic configuration of a virtqueue + * @index: virtqueue index + * @max_size: the max size of virtqueue + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue. + */ +struct vduse_vq_config { + __u32 index; + __u16 max_size; + __u16 reserved[13]; +}; + +/* + * Setup the specified virtqueue. Make sure all virtqueues have been + * configured before the device is attached to vDPA bus. + */ +#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config) + +/** + * struct vduse_vq_state_split - split virtqueue state + * @avail_index: available index + */ +struct vduse_vq_state_split { + __u16 avail_index; +}; + +/** + * struct vduse_vq_state_packed - packed virtqueue state + * @last_avail_counter: last driver ring wrap counter observed by device + * @last_avail_idx: device available index + * @last_used_counter: device ring wrap counter + * @last_used_idx: used index + */ +struct vduse_vq_state_packed { + __u16 last_avail_counter; + __u16 last_avail_idx; + __u16 last_used_counter; + __u16 last_used_idx; +}; + +/** + * struct vduse_vq_info - information of a virtqueue + * @index: virtqueue index + * @num: the size of virtqueue + * @desc_addr: address of desc area + * @driver_addr: address of driver area + * @device_addr: address of device area + * @split: split virtqueue state + * @packed: packed virtqueue state + * @ready: ready status of virtqueue + * + * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information. + */ +struct vduse_vq_info { + __u32 index; + __u32 num; + __u64 desc_addr; + __u64 driver_addr; + __u64 device_addr; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; + __u8 ready; +}; + +/* Get the specified virtqueue's information. Caller should set index field. */ +#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info) + +/** + * struct vduse_vq_eventfd - eventfd configuration for a virtqueue + * @index: virtqueue index + * @fd: eventfd, -1 means de-assigning the eventfd + * + * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd. + */ +struct vduse_vq_eventfd { + __u32 index; +#define VDUSE_EVENTFD_DEASSIGN -1 + int fd; +}; + +/* + * Setup kick eventfd for specified virtqueue. The kick eventfd is used + * by VDUSE kernel module to notify userspace to consume the avail vring. + */ +#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd) + +/* + * Inject an interrupt for specific virtqueue. It's used to notify virtio driver + * to consume the used vring. + */ +#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32) + +/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */ + +/** + * enum vduse_req_type - request type + * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace + * @VDUSE_SET_STATUS: set the device status + * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for + * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl + */ +enum vduse_req_type { + VDUSE_GET_VQ_STATE, + VDUSE_SET_STATUS, + VDUSE_UPDATE_IOTLB, +}; + +/** + * struct vduse_vq_state - virtqueue state + * @index: virtqueue index + * @split: split virtqueue state + * @packed: packed virtqueue state + */ +struct vduse_vq_state { + __u32 index; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; +}; + +/** + * struct vduse_dev_status - device status + * @status: device status + */ +struct vduse_dev_status { + __u8 status; +}; + +/** + * struct vduse_iova_range - IOVA range [start, last] + * @start: start of the IOVA range + * @last: last of the IOVA range + */ +struct vduse_iova_range { + __u64 start; + __u64 last; +}; + +/** + * struct vduse_dev_request - control request + * @type: request type + * @request_id: request id + * @reserved: for future use + * @vq_state: virtqueue state, only index field is available + * @s: device status + * @iova: IOVA range for updating + * @padding: padding + * + * Structure used by read(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_request { + __u32 type; + __u32 request_id; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + struct vduse_dev_status s; + struct vduse_iova_range iova; + __u32 padding[32]; + }; +}; + +/** + * struct vduse_dev_response - response to control request + * @request_id: corresponding request id + * @result: the result of request + * @reserved: for future use, needs to be initialized to zero + * @vq_state: virtqueue state + * @padding: padding + * + * Structure used by write(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_response { + __u32 request_id; +#define VDUSE_REQ_RESULT_OK 0x00 +#define VDUSE_REQ_RESULT_FAILED 0x01 + __u32 result; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + __u32 padding[32]; + }; +}; + +#endif /* _UAPI_VDUSE_H_ */ diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 311a01cc5775..9260791b8438 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1747,6 +1747,9 @@ struct v4l2_ext_control { struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params; struct v4l2_ctrl_fwht_params __user *p_fwht_params; struct v4l2_ctrl_vp8_frame __user *p_vp8_frame; + struct v4l2_ctrl_mpeg2_sequence __user *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture __user *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation; void __user *ptr; }; } __attribute__ ((packed)); @@ -1807,6 +1810,10 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220, V4L2_CTRL_TYPE_VP8_FRAME = 0x0240, + + V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250, + V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251, + V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ diff --git a/include/uapi/linux/virtio_gpio.h b/include/uapi/linux/virtio_gpio.h new file mode 100644 index 000000000000..0445f905d8cc --- /dev/null +++ b/include/uapi/linux/virtio_gpio.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _LINUX_VIRTIO_GPIO_H +#define _LINUX_VIRTIO_GPIO_H + +#include <linux/types.h> + +/* Virtio GPIO request types */ +#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001 +#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002 +#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003 +#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004 +#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005 + +/* Possible values of the status field */ +#define VIRTIO_GPIO_STATUS_OK 0x0 +#define VIRTIO_GPIO_STATUS_ERR 0x1 + +/* Direction types */ +#define VIRTIO_GPIO_DIRECTION_NONE 0x00 +#define VIRTIO_GPIO_DIRECTION_OUT 0x01 +#define VIRTIO_GPIO_DIRECTION_IN 0x02 + +struct virtio_gpio_config { + __le16 ngpio; + __u8 padding[2]; + __le32 gpio_names_size; +} __packed; + +/* Virtio GPIO Request / Response */ +struct virtio_gpio_request { + __le16 type; + __le16 gpio; + __le32 value; +}; + +struct virtio_gpio_response { + __u8 status; + __u8 value; +}; + +struct virtio_gpio_response_get_names { + __u8 status; + __u8 value[]; +}; + +#endif /* _LINUX_VIRTIO_GPIO_H */ diff --git a/include/uapi/linux/virtio_i2c.h b/include/uapi/linux/virtio_i2c.h new file mode 100644 index 000000000000..7c6a6fc01ad6 --- /dev/null +++ b/include/uapi/linux/virtio_i2c.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ +/* + * Definitions for virtio I2C Adpter + * + * Copyright (c) 2021 Intel Corporation. All rights reserved. + */ + +#ifndef _UAPI_LINUX_VIRTIO_I2C_H +#define _UAPI_LINUX_VIRTIO_I2C_H + +#include <linux/const.h> +#include <linux/types.h> + +/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */ +#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0) + +/** + * struct virtio_i2c_out_hdr - the virtio I2C message OUT header + * @addr: the controlled device address + * @padding: used to pad to full dword + * @flags: used for feature extensibility + */ +struct virtio_i2c_out_hdr { + __le16 addr; + __le16 padding; + __le32 flags; +}; + +/** + * struct virtio_i2c_in_hdr - the virtio I2C message IN header + * @status: the processing result from the backend + */ +struct virtio_i2c_in_hdr { + __u8 status; +}; + +/* The final status written by the device */ +#define VIRTIO_I2C_MSG_OK 0 +#define VIRTIO_I2C_MSG_ERR 1 + +#endif /* _UAPI_LINUX_VIRTIO_I2C_H */ diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 4fe842c3a3a9..80d76b75bccd 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -54,7 +54,31 @@ #define VIRTIO_ID_SOUND 25 /* virtio sound */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ +#define VIRTIO_ID_RPMB 28 /* virtio rpmb */ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ +#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */ +#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */ +#define VIRTIO_ID_SCMI 32 /* virtio SCMI */ +#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/ +#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */ +#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */ +#define VIRTIO_ID_CAN 36 /* virtio can */ +#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */ +#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */ +#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */ #define VIRTIO_ID_BT 40 /* virtio bluetooth */ +#define VIRTIO_ID_GPIO 41 /* virtio gpio */ + +/* + * Virtio Transitional IDs + */ + +#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */ +#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */ +#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */ +#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */ +#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */ +#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */ +#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h new file mode 100644 index 000000000000..668b07ce515b --- /dev/null +++ b/include/uapi/linux/virtio_pcidev.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (C) 2021 Intel Corporation + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#ifndef _UAPI_LINUX_VIRTIO_PCIDEV_H +#define _UAPI_LINUX_VIRTIO_PCIDEV_H +#include <linux/types.h> + +/** + * enum virtio_pcidev_ops - virtual PCI device operations + * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors + * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable; + * the @data field should be filled in by the device (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable; + * the @data field contains the data to write (in little endian). + * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but + * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE) + * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for + * the number + * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports + * the 16- or 32-bit write that would otherwise be done into memory, + * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above + * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be + * all zeroes) to signal the PME# pin. + */ +enum virtio_pcidev_ops { + VIRTIO_PCIDEV_OP_RESERVED = 0, + VIRTIO_PCIDEV_OP_CFG_READ, + VIRTIO_PCIDEV_OP_CFG_WRITE, + VIRTIO_PCIDEV_OP_MMIO_READ, + VIRTIO_PCIDEV_OP_MMIO_WRITE, + VIRTIO_PCIDEV_OP_MMIO_MEMSET, + VIRTIO_PCIDEV_OP_INT, + VIRTIO_PCIDEV_OP_MSI, + VIRTIO_PCIDEV_OP_PME, +}; + +/** + * struct virtio_pcidev_msg - virtio PCI device operation + * @op: the operation to do + * @bar: the bar (only with BAR read/write messages) + * @reserved: reserved + * @size: the size of the read/write (in bytes) + * @addr: the address to read/write + * @data: the data, normally @size long, but just one byte for + * %VIRTIO_PCIDEV_OP_MMIO_MEMSET + * + * Note: the fields are all in native (CPU) endian, however, the + * @data values will often be in little endian (see the ops above.) + */ +struct virtio_pcidev_msg { + __u8 op; + __u8 bar; + __u16 reserved; + __u32 size; + __u64 addr; + __u8 data[]; +}; + +#endif /* _UAPI_LINUX_VIRTIO_PCIDEV_H */ diff --git a/include/uapi/linux/virtio_scmi.h b/include/uapi/linux/virtio_scmi.h new file mode 100644 index 000000000000..f8ddd04a3ace --- /dev/null +++ b/include/uapi/linux/virtio_scmi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (C) 2020-2021 OpenSynergy GmbH + * Copyright (C) 2021 ARM Ltd. + */ + +#ifndef _UAPI_LINUX_VIRTIO_SCMI_H +#define _UAPI_LINUX_VIRTIO_SCMI_H + +#include <linux/virtio_types.h> + +/* Device implements some SCMI notifications, or delayed responses. */ +#define VIRTIO_SCMI_F_P2A_CHANNELS 0 + +/* Device implements any SCMI statistics shared memory region */ +#define VIRTIO_SCMI_F_SHARED_MEMORY 1 + +/* Virtqueues */ + +#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */ +#define VIRTIO_SCMI_VQ_RX 1 /* eventq */ +#define VIRTIO_SCMI_VQ_MAX_CNT 2 + +#endif /* _UAPI_LINUX_VIRTIO_SCMI_H */ diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h index 1d57ed3d84d2..64738838bee5 100644 --- a/include/uapi/linux/virtio_vsock.h +++ b/include/uapi/linux/virtio_vsock.h @@ -38,6 +38,9 @@ #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +/* The feature bitmap for virtio vsock */ +#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */ + struct virtio_vsock_config { __le64 guest_cid; } __attribute__((packed)); @@ -65,6 +68,7 @@ struct virtio_vsock_hdr { enum virtio_vsock_type { VIRTIO_VSOCK_TYPE_STREAM = 1, + VIRTIO_VSOCK_TYPE_SEQPACKET = 2, }; enum virtio_vsock_op { @@ -91,4 +95,10 @@ enum virtio_vsock_shutdown { VIRTIO_VSOCK_SHUTDOWN_SEND = 2, }; +/* VIRTIO_VSOCK_OP_RW flags values */ +enum virtio_vsock_rw { + VIRTIO_VSOCK_SEQ_EOM = 1, + VIRTIO_VSOCK_SEQ_EOR = 2, +}; + #endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */ diff --git a/include/uapi/linux/wwan.h b/include/uapi/linux/wwan.h new file mode 100644 index 000000000000..32a2720b4d11 --- /dev/null +++ b/include/uapi/linux/wwan.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2021 Intel Corporation. + */ +#ifndef _UAPI_WWAN_H_ +#define _UAPI_WWAN_H_ + +enum { + IFLA_WWAN_UNSPEC, + IFLA_WWAN_LINK_ID, /* u32 */ + + __IFLA_WWAN_MAX +}; +#define IFLA_WWAN_MAX (__IFLA_WWAN_MAX - 1) + +#endif /* _UAPI_WWAN_H_ */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ffc6a5391bb7..b96c1ea7166d 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -213,6 +213,11 @@ enum { XFRM_MSG_GETSPDINFO, #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO + XFRM_MSG_SETDEFAULT, +#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT + XFRM_MSG_GETDEFAULT, +#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT + XFRM_MSG_MAPPING, #define XFRM_MSG_MAPPING XFRM_MSG_MAPPING __XFRM_MSG_MAX @@ -508,6 +513,12 @@ struct xfrm_user_offload { #define XFRM_OFFLOAD_IPV6 1 #define XFRM_OFFLOAD_INBOUND 2 +struct xfrm_userpolicy_default { +#define XFRM_USERPOLICY_DIRMASK_MAX (sizeof(__u8) * 8) + __u8 dirmask; + __u8 action; +}; + #ifndef __KERNEL__ /* backwards compatibility for userspace */ #define XFRMGRP_ACQUIRE 1 diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 6d2d34c9f375..7cc2a0f3f2f5 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -276,7 +276,17 @@ enum hl_device_status { HL_DEVICE_STATUS_OPERATIONAL, HL_DEVICE_STATUS_IN_RESET, HL_DEVICE_STATUS_MALFUNCTION, - HL_DEVICE_STATUS_NEEDS_RESET + HL_DEVICE_STATUS_NEEDS_RESET, + HL_DEVICE_STATUS_IN_DEVICE_CREATION, + HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_DEVICE_CREATION +}; + +enum hl_server_type { + HL_SERVER_TYPE_UNKNOWN = 0, + HL_SERVER_GAUDI_HLS1 = 1, + HL_SERVER_GAUDI_HLS1H = 2, + HL_SERVER_GAUDI_TYPE1 = 3, + HL_SERVER_GAUDI_TYPE2 = 4 }; /* Opcode for management ioctl @@ -313,6 +323,7 @@ enum hl_device_status { * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency + * HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -331,21 +342,54 @@ enum hl_device_status { #define HL_INFO_TOTAL_ENERGY 15 #define HL_INFO_PLL_FREQUENCY 16 #define HL_INFO_POWER 17 +#define HL_INFO_OPEN_STATS 18 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 +/** + * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC + * @sram_base_address: The first SRAM physical base address that is free to be + * used by the user. + * @dram_base_address: The first DRAM virtual or physical base address that is + * free to be used by the user. + * @dram_size: The DRAM size that is available to the user. + * @sram_size: The SRAM size that is available to the user. + * @num_of_events: The number of events that can be received from the f/w. This + * is needed so the user can what is the size of the h/w events + * array he needs to pass to the kernel when he wants to fetch + * the event counters. + * @device_id: PCI device ID of the ASIC. + * @module_id: Module ID of the ASIC for mezzanine cards in servers + * (From OCP spec). + * @first_available_interrupt_id: The first available interrupt ID for the user + * to be used when it works with user interrupts. + * @server_type: Server type that the Gaudi ASIC is currently installed in. + * The value is according to enum hl_server_type + * @cpld_version: CPLD version on the board. + * @psoc_pci_pll_nr: PCI PLL NR value. Needed by the profiler in some ASICs. + * @psoc_pci_pll_nf: PCI PLL NF value. Needed by the profiler in some ASICs. + * @psoc_pci_pll_od: PCI PLL OD value. Needed by the profiler in some ASICs. + * @psoc_pci_pll_div_factor: PCI PLL DIV factor value. Needed by the profiler + * in some ASICs. + * @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant + * for Goya/Gaudi only. + * @dram_enabled: Whether the DRAM is enabled. + * @cpucp_version: The CPUCP f/w version. + * @card_name: The card name as passed by the f/w. + * @dram_page_size: The DRAM physical page size. + */ struct hl_info_hw_ip_info { __u64 sram_base_address; __u64 dram_base_address; __u64 dram_size; __u32 sram_size; __u32 num_of_events; - __u32 device_id; /* PCI Device ID */ - __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ + __u32 device_id; + __u32 module_id; __u32 reserved; __u16 first_available_interrupt_id; - __u16 reserved2; + __u16 server_type; __u32 cpld_version; __u32 psoc_pci_pll_nr; __u32 psoc_pci_pll_nf; @@ -356,7 +400,7 @@ struct hl_info_hw_ip_info { __u8 pad[2]; __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; - __u64 reserved3; + __u64 reserved2; __u64 dram_page_size; }; @@ -445,6 +489,16 @@ struct hl_pll_frequency_info { }; /** + * struct hl_open_stats_info - device open statistics information + * @open_counter: ever growing counter, increased on each successful dev open + * @last_open_period_ms: duration (ms) device was open last time + */ +struct hl_open_stats_info { + __u64 open_counter; + __u64 last_open_period_ms; +}; + +/** * struct hl_power_info - power information * @power: power consumption */ @@ -616,12 +670,21 @@ struct hl_cs_chunk { __u64 cb_handle; /* Relevant only when HL_CS_FLAGS_WAIT or - * HL_CS_FLAGS_COLLECTIVE_WAIT is set. + * HL_CS_FLAGS_COLLECTIVE_WAIT is set * This holds address of array of u64 values that contain - * signal CS sequence numbers. The wait described by this job - * will listen on all those signals (wait event per signal) + * signal CS sequence numbers. The wait described by + * this job will listen on all those signals + * (wait event per signal) */ __u64 signal_seq_arr; + + /* + * Relevant only when HL_CS_FLAGS_WAIT or + * HL_CS_FLAGS_COLLECTIVE_WAIT is set + * along with HL_CS_FLAGS_ENCAP_SIGNALS. + * This is the CS sequence which has the encapsulated signals. + */ + __u64 encaps_signal_seq; }; /* Index of queue to put the CB on */ @@ -639,6 +702,17 @@ struct hl_cs_chunk { * Number of entries in signal_seq_arr */ __u32 num_signal_seq_arr; + + /* Relevant only when HL_CS_FLAGS_WAIT or + * HL_CS_FLAGS_COLLECTIVE_WAIT is set along + * with HL_CS_FLAGS_ENCAP_SIGNALS + * This set the signals range that the user want to wait for + * out of the whole reserved signals range. + * e.g if the signals range is 20, and user don't want + * to wait for signal 8, so he set this offset to 7, then + * he call the API again with 9 and so on till 20. + */ + __u32 encaps_signal_offset; }; /* HL_CS_CHUNK_FLAGS_* */ @@ -664,6 +738,29 @@ struct hl_cs_chunk { #define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 #define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100 #define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200 +#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400 + +/* + * The encapsulated signals CS is merged into the existing CS ioctls. + * In order to use this feature need to follow the below procedure: + * 1. Reserve signals, set the CS type to HL_CS_FLAGS_RESERVE_SIGNALS_ONLY + * the output of this API will be the SOB offset from CFG_BASE. + * this address will be used to patch CB cmds to do the signaling for this + * SOB by incrementing it's value. + * for reverting the reservation use HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY + * CS type, note that this might fail if out-of-sync happened to the SOB + * value, in case other signaling request to the same SOB occurred between + * reserve-unreserve calls. + * 2. Use the staged CS to do the encapsulated signaling jobs. + * use HL_CS_FLAGS_STAGED_SUBMISSION and HL_CS_FLAGS_STAGED_SUBMISSION_FIRST + * along with HL_CS_FLAGS_ENCAP_SIGNALS flag, and set encaps_signal_offset + * field. This offset allows app to wait on part of the reserved signals. + * 3. Use WAIT/COLLECTIVE WAIT CS along with HL_CS_FLAGS_ENCAP_SIGNALS flag + * to wait for the encapsulated signals. + */ +#define HL_CS_FLAGS_ENCAP_SIGNALS 0x800 +#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000 +#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000 #define HL_CS_STATUS_SUCCESS 0 @@ -677,10 +774,35 @@ struct hl_cs_in { /* holds address of array of hl_cs_chunk for execution phase */ __u64 chunks_execute; - /* Sequence number of a staged submission CS - * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set - */ - __u64 seq; + union { + /* + * Sequence number of a staged submission CS + * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set and + * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST is unset. + */ + __u64 seq; + + /* + * Encapsulated signals handle id + * Valid for two flows: + * 1. CS with encapsulated signals: + * when HL_CS_FLAGS_STAGED_SUBMISSION and + * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST + * and HL_CS_FLAGS_ENCAP_SIGNALS are set. + * 2. unreserve signals: + * valid when HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY is set. + */ + __u32 encaps_sig_handle_id; + + /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */ + struct { + /* Encapsulated signals number */ + __u32 encaps_signals_count; + + /* Encapsulated signals queue index (stream) */ + __u32 encaps_signals_q_idx; + }; + }; /* Number of chunks in restore phase array. Maximum number is * HL_MAX_JOBS_PER_CS @@ -705,14 +827,31 @@ struct hl_cs_in { }; struct hl_cs_out { + union { + /* + * seq holds the sequence number of the CS to pass to wait + * ioctl. All values are valid except for 0 and ULLONG_MAX + */ + __u64 seq; + + /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */ + struct { + /* This is the resereved signal handle id */ + __u32 handle_id; + + /* This is the signals count */ + __u32 count; + }; + }; + + /* HL_CS_STATUS */ + __u32 status; + /* - * seq holds the sequence number of the CS to pass to wait ioctl. All - * values are valid except for 0 and ULLONG_MAX + * SOB base address offset + * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */ - __u64 seq; - /* HL_CS_STATUS_* */ - __u32 status; - __u32 pad; + __u32 sob_base_addr_offset; }; union hl_cs_args { @@ -722,11 +861,18 @@ union hl_cs_args { #define HL_WAIT_CS_FLAGS_INTERRUPT 0x2 #define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000 +#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4 + +#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32 struct hl_wait_cs_in { union { struct { - /* Command submission sequence number */ + /* + * In case of wait_cs holds the CS sequence number. + * In case of wait for multi CS hold a user pointer to + * an array of CS sequence numbers + */ __u64 seq; /* Absolute timeout to wait for command submission * in microseconds @@ -754,12 +900,17 @@ struct hl_wait_cs_in { /* Context ID - Currently not in use */ __u32 ctx_id; + /* HL_WAIT_CS_FLAGS_* * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order * not to specify an interrupt id ,set mask to all 1s. */ __u32 flags; + + /* Multi CS API info- valid entries in multi-CS array */ + __u8 seq_arr_len; + __u8 pad[7]; }; #define HL_WAIT_CS_STATUS_COMPLETED 0 @@ -776,8 +927,15 @@ struct hl_wait_cs_out { __u32 status; /* HL_WAIT_CS_STATUS_FLAG* */ __u32 flags; - /* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set */ + /* + * valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set + * for wait_cs: timestamp of CS completion + * for wait_multi_cs: timestamp of FIRST CS completion + */ __s64 timestamp_nsec; + /* multi CS completion bitmap */ + __u32 cs_completion_map; + __u32 pad; }; union hl_wait_cs_args { @@ -800,6 +958,7 @@ union hl_wait_cs_args { #define HL_MEM_CONTIGUOUS 0x1 #define HL_MEM_SHARED 0x2 #define HL_MEM_USERPTR 0x4 +#define HL_MEM_FORCE_HINT 0x8 struct hl_mem_in { union { diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index dc52e3cf574c..b1de99bf56ce 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -49,7 +49,14 @@ #define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18 enum { - BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL + BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL, + BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL, +}; + +enum bnxt_re_wqe_mode { + BNXT_QPLIB_WQE_MODE_STATIC = 0x00, + BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01, + BNXT_QPLIB_WQE_MODE_INVALID = 0x02, }; struct bnxt_re_uctx_resp { @@ -62,6 +69,8 @@ struct bnxt_re_uctx_resp { __aligned_u64 comp_mask; __u32 chip_id0; __u32 chip_id1; + __u32 mode; + __u32 rsvd1; /* padding */ }; /* diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h deleted file mode 100644 index 79890baa6fdb..000000000000 --- a/include/uapi/rdma/i40iw-abi.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved. - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef I40IW_ABI_H -#define I40IW_ABI_H - -#include <linux/types.h> - -#define I40IW_ABI_VER 5 - -struct i40iw_alloc_ucontext_req { - __u32 reserved32; - __u8 userspace_ver; - __u8 reserved8[3]; -}; - -struct i40iw_alloc_ucontext_resp { - __u32 max_pds; /* maximum pds allowed for this user process */ - __u32 max_qps; /* maximum qps allowed for this user process */ - __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ - __u8 kernel_ver; - __u8 reserved[3]; -}; - -struct i40iw_alloc_pd_resp { - __u32 pd_id; - __u8 reserved[4]; -}; - -struct i40iw_create_cq_req { - __aligned_u64 user_cq_buffer; - __aligned_u64 user_shadow_area; -}; - -struct i40iw_create_qp_req { - __aligned_u64 user_wqe_buffers; - __aligned_u64 user_compl_ctx; - - /* UDA QP PHB */ - __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */ - __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */ -}; - -enum i40iw_memreg_type { - IW_MEMREG_TYPE_MEM = 0x0000, - IW_MEMREG_TYPE_QP = 0x0001, - IW_MEMREG_TYPE_CQ = 0x0002, -}; - -struct i40iw_mem_reg_req { - __u16 reg_type; /* Memory, QP or CQ */ - __u16 cq_pages; - __u16 rq_pages; - __u16 sq_pages; -}; - -struct i40iw_create_cq_resp { - __u32 cq_id; - __u32 cq_size; - __u32 mmap_db_index; - __u32 reserved; -}; - -struct i40iw_create_qp_resp { - __u32 qp_id; - __u32 actual_sq_size; - __u32 actual_rq_size; - __u32 i40iw_drv_opt; - __u16 push_idx; - __u8 lsmm; - __u8 rsvd2; -}; - -#endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 22483799cd07..3072e5d6b692 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -240,6 +240,7 @@ enum rdma_driver_id { RDMA_DRIVER_OCRDMA, RDMA_DRIVER_NES, RDMA_DRIVER_I40IW, + RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW, RDMA_DRIVER_VMW_PVRDMA, RDMA_DRIVER_QEDR, RDMA_DRIVER_HNS, diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 90c0cf228020..10b5f6a4c677 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -143,7 +143,7 @@ struct ib_user_mad_hdr { */ struct ib_user_mad { struct ib_user_mad_hdr hdr; - __aligned_u64 data[0]; + __aligned_u64 data[]; }; /* diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h new file mode 100644 index 000000000000..a7085e092d34 --- /dev/null +++ b/include/uapi/rdma/irdma-abi.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */ +/* + * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + */ + +#ifndef IRDMA_ABI_H +#define IRDMA_ABI_H + +#include <linux/types.h> + +/* irdma must support legacy GEN_1 i40iw kernel + * and user-space whose last ABI ver is 5 + */ +#define IRDMA_ABI_VER 5 + +enum irdma_memreg_type { + IRDMA_MEMREG_TYPE_MEM = 0, + IRDMA_MEMREG_TYPE_QP = 1, + IRDMA_MEMREG_TYPE_CQ = 2, +}; + +struct irdma_alloc_ucontext_req { + __u32 rsvd32; + __u8 userspace_ver; + __u8 rsvd8[3]; +}; + +struct irdma_alloc_ucontext_resp { + __u32 max_pds; + __u32 max_qps; + __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ + __u8 kernel_ver; + __u8 rsvd[3]; + __aligned_u64 feature_flags; + __aligned_u64 db_mmap_key; + __u32 max_hw_wq_frags; + __u32 max_hw_read_sges; + __u32 max_hw_inline; + __u32 max_hw_rq_quanta; + __u32 max_hw_wq_quanta; + __u32 min_hw_cq_size; + __u32 max_hw_cq_size; + __u16 max_hw_sq_chunk; + __u8 hw_rev; + __u8 rsvd2; +}; + +struct irdma_alloc_pd_resp { + __u32 pd_id; + __u8 rsvd[4]; +}; + +struct irdma_resize_cq_req { + __aligned_u64 user_cq_buffer; +}; + +struct irdma_create_cq_req { + __aligned_u64 user_cq_buf; + __aligned_u64 user_shadow_area; +}; + +struct irdma_create_qp_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; +}; + +struct irdma_mem_reg_req { + __u16 reg_type; /* enum irdma_memreg_type */ + __u16 cq_pages; + __u16 rq_pages; + __u16 sq_pages; +}; + +struct irdma_modify_qp_req { + __u8 sq_flush; + __u8 rq_flush; + __u8 rsvd[6]; +}; + +struct irdma_create_cq_resp { + __u32 cq_id; + __u32 cq_size; +}; + +struct irdma_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 irdma_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd; + __u32 qp_caps; +}; + +struct irdma_modify_qp_resp { + __aligned_u64 push_wqe_mmap_key; + __aligned_u64 push_db_mmap_key; + __u16 push_offset; + __u8 push_valid; + __u8 rsvd[5]; +}; + +struct irdma_create_ah_resp { + __u32 ah_id; + __u8 rsvd[4]; +}; +#endif /* IRDMA_ABI_H */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 27905a0268c9..86be4a92b67b 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -50,6 +50,7 @@ enum { MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10, + MLX5_QP_FLAG_DCI_STREAM = 1 << 11, }; enum { @@ -101,6 +102,8 @@ enum mlx5_ib_alloc_ucontext_resp_mask { MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2, + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3, + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4, }; enum mlx5_user_cmds_supp_uhw { @@ -236,6 +239,11 @@ struct mlx5_ib_striding_rq_caps { __u32 reserved; }; +struct mlx5_ib_dci_streams_caps { + __u8 max_log_num_concurent; + __u8 max_log_num_errored; +}; + enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, @@ -264,12 +272,14 @@ struct mlx5_ib_query_device_resp { struct mlx5_ib_sw_parsing_caps sw_parsing_caps; struct mlx5_ib_striding_rq_caps striding_rq_caps; __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ - __u32 reserved; + struct mlx5_ib_dci_streams_caps dci_streams_caps; + __u16 reserved; }; enum mlx5_ib_create_cq_flags { MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1, + MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2, }; struct mlx5_ib_create_cq { @@ -310,6 +320,11 @@ struct mlx5_ib_create_srq_resp { __u32 reserved; }; +struct mlx5_ib_create_qp_dci_streams { + __u8 log_num_concurent; + __u8 log_num_errored; +}; + struct mlx5_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; @@ -324,7 +339,8 @@ struct mlx5_ib_create_qp { __aligned_u64 access_key; }; __u32 ece_options; - __u32 reserved; + struct mlx5_ib_create_qp_dci_streams dci_streams; + __u16 reserved; }; /* RX Hash function flags */ diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index 068433e2229d..e283c2220aba 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -99,7 +99,16 @@ struct rxe_send_wr { __u32 remote_qkey; __u16 pkey_index; } ud; + struct { + __aligned_u64 addr; + __aligned_u64 length; + __u32 mr_lkey; + __u32 mw_rkey; + __u32 rkey; + __u32 access; + } mw; /* reg is only used by the kernel and is not part of the uapi */ +#ifdef __KERNEL__ struct { union { struct ib_mr *mr; @@ -108,6 +117,7 @@ struct rxe_send_wr { __u32 key; __u32 access; } reg; +#endif } wr; }; diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index 91d4be987220..c9812c5c2fc4 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -41,6 +41,7 @@ enum fc_els_cmd { ELS_REC = 0x13, /* read exchange concise */ ELS_SRR = 0x14, /* sequence retransmission request */ ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */ + ELS_EDC = 0x17, /* Exchange Diagnostic Capabilities */ ELS_RDP = 0x18, /* Read Diagnostic Parameters */ ELS_RDF = 0x19, /* Register Diagnostic Functions */ ELS_PRLI = 0x20, /* process login */ @@ -111,6 +112,7 @@ enum fc_els_cmd { [ELS_REC] = "REC", \ [ELS_SRR] = "SRR", \ [ELS_FPIN] = "FPIN", \ + [ELS_EDC] = "EDC", \ [ELS_RDP] = "RDP", \ [ELS_RDF] = "RDF", \ [ELS_PRLI] = "PRLI", \ @@ -218,6 +220,10 @@ enum fc_els_rjt_explan { enum fc_ls_tlv_dtag { ELS_DTAG_LS_REQ_INFO = 0x00000001, /* Link Service Request Information Descriptor */ + ELS_DTAG_LNK_FAULT_CAP = 0x0001000D, + /* Link Fault Capability Descriptor */ + ELS_DTAG_CG_SIGNAL_CAP = 0x0001000F, + /* Congestion Signaling Capability Descriptor */ ELS_DTAG_LNK_INTEGRITY = 0x00020001, /* Link Integrity Notification Descriptor */ ELS_DTAG_DELIVERY = 0x00020002, @@ -236,6 +242,8 @@ enum fc_ls_tlv_dtag { */ #define FC_LS_TLV_DTAG_INIT { \ { ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \ + { ELS_DTAG_LNK_FAULT_CAP, "Link Fault Capability" }, \ + { ELS_DTAG_CG_SIGNAL_CAP, "Congestion Signaling Capability" }, \ { ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \ { ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \ { ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \ @@ -1144,4 +1152,102 @@ struct fc_els_rdf_resp { }; +/* + * Diagnostic Capability Descriptors for EDC ELS + */ + +/* + * Diagnostic: Link Fault Capability Descriptor + */ +struct fc_diag_lnkflt_desc { + __be32 desc_tag; /* Descriptor Tag (0x0001000D) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + * 12 bytes + */ + __be32 degrade_activate_threshold; + __be32 degrade_deactivate_threshold; + __be32 fec_degrade_interval; +}; + +enum fc_edc_cg_signal_cap_types { + /* Note: Capability: bits 31:4 Rsvd; bits 3:0 are capabilities */ + EDC_CG_SIG_NOTSUPPORTED = 0x00, /* neither supported */ + EDC_CG_SIG_WARN_ONLY = 0x01, + EDC_CG_SIG_WARN_ALARM = 0x02, /* both supported */ +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_EDC_CG_SIGNAL_CAP_TYPES_INIT { \ + { EDC_CG_SIG_NOTSUPPORTED, "Signaling Not Supported" }, \ + { EDC_CG_SIG_WARN_ONLY, "Warning Signal" }, \ + { EDC_CG_SIG_WARN_ALARM, "Warning and Alarm Signals" }, \ +} + +enum fc_diag_cg_sig_freq_types { + EDC_CG_SIGFREQ_CNT_MIN = 1, /* Min Frequency Count */ + EDC_CG_SIGFREQ_CNT_MAX = 999, /* Max Frequency Count */ + + EDC_CG_SIGFREQ_SEC = 0x1, /* Units: seconds */ + EDC_CG_SIGFREQ_MSEC = 0x2, /* Units: milliseconds */ +}; + +struct fc_diag_cg_sig_freq { + __be16 count; /* Time between signals + * note: upper 6 bits rsvd + */ + __be16 units; /* Time unit for count + * note: upper 12 bits rsvd + */ +}; + +/* + * Diagnostic: Congestion Signaling Capability Descriptor + */ +struct fc_diag_cg_sig_desc { + __be32 desc_tag; /* Descriptor Tag (0x0001000F) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + * 16 bytes + */ + __be32 xmt_signal_capability; + struct fc_diag_cg_sig_freq xmt_signal_frequency; + __be32 rcv_signal_capability; + struct fc_diag_cg_sig_freq rcv_signal_frequency; +}; + +/* + * ELS_EDC - Exchange Diagnostic Capabilities + */ +struct fc_els_edc { + __u8 edc_cmd; /* command (0x17) */ + __u8 edc_zero[3]; /* specified as zero - part of cmd */ + __be32 desc_len; /* Length of Descriptor List (in bytes). + * Size of ELS excluding edc_cmd, + * edc_zero and desc_len fields. + */ + struct fc_tlv_desc desc[0]; + /* Diagnostic Descriptor list */ +}; + +/* + * ELS EDC LS_ACC Response. + */ +struct fc_els_edc_resp { + struct fc_els_ls_acc acc_hdr; + __be32 desc_list_len; /* Length of response (in + * bytes). Excludes acc_hdr + * and desc_list_len fields. + */ + struct fc_els_lsri_desc lsri; + struct fc_tlv_desc desc[0]; + /* Supported Diagnostic Descriptor list */ +}; + + #endif /* _FC_ELS_H_ */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 535a7229e1d9..1d84ec9db93b 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -299,6 +299,7 @@ typedef int __bitwise snd_pcm_subformat_t; #define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */ #define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */ #define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */ +#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */ #define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */ #define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ @@ -710,7 +711,7 @@ enum { * Raw MIDI section - /dev/snd/midi?? */ -#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1) +#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2) enum { SNDRV_RAWMIDI_STREAM_OUTPUT = 0, @@ -736,12 +737,38 @@ struct snd_rawmidi_info { unsigned char reserved[64]; /* reserved for future use */ }; +#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0 +#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0) +#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0) +#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3 +#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3) +#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3) + +#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16 + +struct snd_rawmidi_framing_tstamp { + /* For now, frame_type is always 0. Midi 2.0 is expected to add new + * types here. Applications are expected to skip unknown frame types. + */ + __u8 frame_type; + __u8 length; /* number of valid bytes in data field */ + __u8 reserved[2]; + __u32 tv_nsec; /* nanoseconds */ + __u64 tv_sec; /* seconds */ + __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH]; +} __packed; + struct snd_rawmidi_params { int stream; size_t buffer_size; /* queue size in bytes */ size_t avail_min; /* minimum avail bytes for wakeup */ unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */ - unsigned char reserved[16]; /* reserved for future use */ + unsigned int mode; /* For input data only, frame incoming data */ + unsigned char reserved[12]; /* reserved for future use */ }; #ifndef __KERNEL__ diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h index 8ba0112e5336..ff3748e9308a 100644 --- a/include/uapi/sound/snd_sst_tokens.h +++ b/include/uapi/sound/snd_sst_tokens.h @@ -233,6 +233,8 @@ * * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry * + * %SKL_TKN_U32_FMT_CFG_IDX: Format config index + * * module_id and loadable flags dont have tokens as these values will be * read from the DSP FW manifest * @@ -324,7 +326,9 @@ enum SKL_TKNS { SKL_TKN_U32_ASTATE_COUNT, SKL_TKN_U32_ASTATE_KCPS, SKL_TKN_U32_ASTATE_CLK_SRC, - SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC, + + SKL_TKN_U32_FMT_CFG_IDX = 96, + SKL_TKN_MAX = SKL_TKN_U32_FMT_CFG_IDX, }; #endif diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index d1b3889f74d8..c422a403c099 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -17,6 +17,7 @@ #include <linux/bitmap.h> #include <linux/fb.h> #include <linux/of.h> +#include <drm/drm_color_mgmt.h> #include <media/v4l2-mediabus.h> #include <video/videomode.h> @@ -330,6 +331,7 @@ int ipu_dp_enable_channel(struct ipu_dp *dp); void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync); void ipu_dp_disable(struct ipu_soc *ipu); int ipu_dp_setup_channel(struct ipu_dp *dp, + enum drm_color_encoding ycbcr_enc, enum drm_color_range range, enum ipu_color_space in, enum ipu_color_space out); int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos); int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha, diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 2af7a1cd6658..b39cdbc522ec 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -1,21 +1,53 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ +/* + * When #include'ing this header, you need to provide the following + * declaration upfront: + * - standard integers types (uint8_t, uint16_t, etc) + * They are provided by stdint.h of the standard headers. + * + * In addition, if you intend to use the FLEX macros, you also need to + * provide the following, before invoking the FLEX macros: + * - size_t + * - memcpy + * - grant_ref_t + * These declarations are provided by string.h of the standard headers, + * and grant_table.h from the Xen public headers. + */ + #include <xen/interface/grant_table.h> typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) @@ -27,82 +59,79 @@ typedef unsigned int RING_IDX; * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ -#define __CONST_RING_SIZE(_s, _sz) \ - (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ - sizeof(((struct _s##_sring *)0)->ring[0]))) - +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) /* * The same for passing in an actual pointer instead of a name tag. */ -#define __RING_SIZE(_s, _sz) \ - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, - * let's say struct request, and struct response already defined. + * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * - * DEFINE_RING_TYPES(mytag, struct request, struct response); + * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * - * struct mytag_sring - The shared ring. - * struct mytag_front_ring - The 'front' half of the ring. - * struct mytag_back_ring - The 'back' half of the ring. + * mytag_sring_t - The shared ring. + * mytag_front_ring_t - The 'front' half of the ring. + * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * - * struct mytag_front_ring front_ring; - * SHARED_RING_INIT((struct mytag_sring *)shared_page); - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, - * PAGE_SIZE); + * mytag_front_ring_t front_ring; + * SHARED_RING_INIT((mytag_sring_t *)shared_page); + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * - * struct mytag_back_ring back_ring; - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, - * PAGE_SIZE); + * mytag_back_ring_t back_ring; + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ - \ -/* Shared ring entry */ \ -union __name##_sring_entry { \ - __req_t req; \ - __rsp_t rsp; \ -}; \ - \ -/* Shared ring page */ \ -struct __name##_sring { \ - RING_IDX req_prod, req_event; \ - RING_IDX rsp_prod, rsp_event; \ - uint8_t pad[48]; \ - union __name##_sring_entry ring[1]; /* variable-length */ \ -}; \ - \ -/* "Front" end's private variables */ \ -struct __name##_front_ring { \ - RING_IDX req_prod_pvt; \ - RING_IDX rsp_cons; \ - unsigned int nr_ents; \ - struct __name##_sring *sring; \ -}; \ - \ -/* "Back" end's private variables */ \ -struct __name##_back_ring { \ - RING_IDX rsp_prod_pvt; \ - RING_IDX req_cons; \ - unsigned int nr_ents; \ - struct __name##_sring *sring; \ -}; - +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + uint8_t __pad[48]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ /* * Macros for manipulating rings. * @@ -119,94 +148,99 @@ struct __name##_back_ring { \ */ /* Initialising empty rings */ -#define SHARED_RING_INIT(_s) do { \ - (_s)->req_prod = (_s)->rsp_prod = 0; \ - (_s)->req_event = (_s)->rsp_event = 1; \ - memset((_s)->pad, 0, sizeof((_s)->pad)); \ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ } while(0) -#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ - (_r)->req_prod_pvt = (_i); \ - (_r)->rsp_cons = (_i); \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ - (_r)->sring = (_s); \ +#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->req_prod_pvt = (_i); \ + (_r)->rsp_cons = (_i); \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ } while (0) #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) -#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ - (_r)->rsp_prod_pvt = (_i); \ - (_r)->req_cons = (_i); \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ - (_r)->sring = (_s); \ +#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ + (_r)->rsp_prod_pvt = (_i); \ + (_r)->req_cons = (_i); \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) /* How big is this ring? */ -#define RING_SIZE(_r) \ +#define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ -#define RING_FREE_REQUESTS(_r) \ +#define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ -#define RING_FULL(_r) \ +#define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ - ({ \ - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ - unsigned int rsp = RING_SIZE(_r) - \ - ((_r)->req_cons - (_r)->rsp_prod_pvt); \ - req < rsp ? req : rsp; \ - }) +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ +}) /* Direct access to individual ring elements, by index. */ -#define RING_GET_REQUEST(_r, _idx) \ +#define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + /* - * Get a local copy of a request. + * Get a local copy of a request/response. * - * Use this in preference to RING_GET_REQUEST() so all processing is + * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is * done on a local copy that cannot be modified by the other end. * * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this - * to be ineffective where _req is a struct which consists of only bitfields. + * to be ineffective where dest is a struct which consists of only bitfields. */ -#define RING_COPY_REQUEST(_r, _idx, _req) do { \ - /* Use volatile to force the copy into _req. */ \ - *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +#define RING_COPY_(type, r, idx, dest) do { \ + /* Use volatile to force the copy into dest. */ \ + *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \ } while (0) -#define RING_GET_RESPONSE(_r, _idx) \ - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) +#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) +#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) /* Loop termination condition: Would the specified index overflow the ring? */ -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) /* Ill-behaved frontend determination: Can there be this many requests? */ -#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) +/* Ill-behaved backend determination: Can there be this many responses? */ +#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) -#define RING_PUSH_REQUESTS(_r) do { \ - virt_wmb(); /* back sees requests /before/ updated producer index */ \ - (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +#define RING_PUSH_REQUESTS(_r) do { \ + virt_wmb(); /* back sees requests /before/ updated producer index */\ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) -#define RING_PUSH_RESPONSES(_r) do { \ - virt_wmb(); /* front sees responses /before/ updated producer index */ \ - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +#define RING_PUSH_RESPONSES(_r) do { \ + virt_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* @@ -239,40 +273,40 @@ struct __name##_back_ring { \ * field appropriately. */ -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ - RING_IDX __old = (_r)->sring->req_prod; \ - RING_IDX __new = (_r)->req_prod_pvt; \ - virt_wmb(); /* back sees requests /before/ updated producer index */ \ - (_r)->sring->req_prod = __new; \ - virt_mb(); /* back sees new requests /before/ we check req_event */ \ - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ - (RING_IDX)(__new - __old)); \ +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + virt_wmb(); /* back sees requests /before/ updated producer index */\ + (_r)->sring->req_prod = __new; \ + virt_mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ } while (0) -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ - RING_IDX __old = (_r)->sring->rsp_prod; \ - RING_IDX __new = (_r)->rsp_prod_pvt; \ - virt_wmb(); /* front sees responses /before/ updated producer index */ \ - (_r)->sring->rsp_prod = __new; \ - virt_mb(); /* front sees new responses /before/ we check rsp_event */ \ - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ - (RING_IDX)(__new - __old)); \ +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + virt_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + virt_mb(); /* front sees new resps /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ } while (0) -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ - if (_work_to_do) break; \ - (_r)->sring->req_event = (_r)->req_cons + 1; \ - virt_mb(); \ - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + virt_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ - if (_work_to_do) break; \ - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ - virt_mb(); \ - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + virt_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) |