diff options
Diffstat (limited to 'include/linux')
211 files changed, 10946 insertions, 2777 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 288fac5294f5..db7c8bd39a3c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -208,7 +208,6 @@ void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); -void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@ -232,12 +231,26 @@ int acpi_table_parse_madt(enum acpi_madt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); -/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void); #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@ -444,8 +457,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100 extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@ -532,6 +549,24 @@ void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -543,6 +578,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); struct fwnode_handle; +static inline bool acpi_dev_found(const char *hid) +{ + return false; +} + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@ -588,7 +628,6 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return NULL; } -static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } @@ -654,6 +693,14 @@ static inline bool acpi_driver_match_device(struct device *dev, return false; } +static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const u8 *uuid, + int rev, int func, + union acpi_object *argv4) +{ + return NULL; +} + static inline int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { @@ -678,6 +725,24 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) #define ACPI_PTR(_ptr) (NULL) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI @@ -997,4 +1062,10 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif +#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 52f3b7da4f2d..9d8031257a90 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -26,10 +26,10 @@ enum alarmtimer_restart { * struct alarm - Alarm timer structure * @node: timerqueue node for adding to the event list this value * also includes the expiration time. - * @period: Period for recuring alarms + * @timer: hrtimer used to schedule events while running * @function: Function pointer to be executed when the timer fires. - * @type: Alarm type (BOOTTIME/REALTIME) - * @enabled: Flag that represents if the alarm is set to fire or not + * @type: Alarm type (BOOTTIME/REALTIME). + * @state: Flag that represents if the alarm is set to fire or not. * @data: Internal data value. */ struct alarm { diff --git a/include/linux/ata.h b/include/linux/ata.h index 99346be5a7ca..adbc812c009b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -46,8 +46,9 @@ enum { ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, - ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ + ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ ATA_ID_WORDS = 256, ATA_ID_CONFIG = 0, @@ -409,6 +410,9 @@ enum { SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ + SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ + /* Enable/Disable Automatic Acoustic Management */ SETFEATURES_AAM_ON = 0x42, SETFEATURES_AAM_OFF = 0xC2, @@ -519,16 +523,23 @@ enum { SERR_DEV_XCHG = (1 << 26), /* device exchanged */ }; -enum ata_tf_protocols { - /* ATA taskfile protocols */ - ATA_PROT_UNKNOWN, /* unknown/invalid */ - ATA_PROT_NODATA, /* no data */ - ATA_PROT_PIO, /* PIO data xfer */ - ATA_PROT_DMA, /* DMA */ - ATA_PROT_NCQ, /* NCQ */ - ATAPI_PROT_NODATA, /* packet command, no data */ - ATAPI_PROT_PIO, /* packet command, PIO data xfer*/ - ATAPI_PROT_DMA, /* packet command with special DMA sauce */ +enum ata_prot_flags { + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ + + /* taskfile protocols */ + ATA_PROT_UNKNOWN = (u8)-1, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = ATA_PROT_FLAG_PIO, + ATA_PROT_DMA = ATA_PROT_FLAG_DMA, + ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, + ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, + ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, + ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, + ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, }; enum ata_ioctls { @@ -1066,12 +1077,12 @@ static inline void ata_id_to_hd_driveid(u16 *id) * TO NV CACHE PINNED SET. */ static inline unsigned ata_set_lba_range_entries(void *_buffer, - unsigned buf_size, u64 sector, unsigned long count) + unsigned num, u64 sector, unsigned long count) { __le64 *buffer = _buffer; unsigned i = 0, used_bytes; - while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */ + while (i < num) { u64 entry = sector | ((u64)(count > 0xffff ? 0xffff : count) << 48); buffer[i++] = __cpu_to_le64(entry); @@ -1095,13 +1106,13 @@ static inline bool ata_ok(u8 status) static inline bool lba_28_ok(u64 block, u32 n_block) { /* check the ending block number: must be LESS THAN 0x0fffffff */ - return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); } static inline bool lba_48_ok(u64 block, u32 n_block) { /* check the ending block number */ - return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); } #define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index e66153d60bd5..76860a461ed2 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -40,6 +40,7 @@ struct ath9k_platform_data { bool tx_gain_buffalo; bool disable_2ghz; bool disable_5ghz; + bool led_active_high; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/atomic.h b/include/linux/atomic.h index e451534fe54d..e71835bf60a9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -163,206 +163,265 @@ #endif #endif /* atomic_dec_return_relaxed */ -/* atomic_xchg_relaxed */ -#ifndef atomic_xchg_relaxed -#define atomic_xchg_relaxed atomic_xchg -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg -#else /* atomic_xchg_relaxed */ +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add -#ifndef atomic_xchg_acquire -#define atomic_xchg_acquire(...) \ - __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg_release -#define atomic_xchg_release(...) \ - __atomic_op_release(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg -#define atomic_xchg(...) \ - __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_inc_relaxed */ +#ifndef atomic_fetch_inc_relaxed + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) +#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) +#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) +#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) +#else /* atomic_fetch_inc */ +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +#define atomic_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) #endif -#endif /* atomic_xchg_relaxed */ -/* atomic_cmpxchg_relaxed */ -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg +#ifndef atomic_fetch_inc_release +#define atomic_fetch_inc_release(...) \ + __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) +#endif -#else /* atomic_cmpxchg_relaxed */ +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(...) \ + __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic_fetch_inc_relaxed */ -#ifndef atomic_cmpxchg_acquire -#define atomic_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg_release -#define atomic_cmpxchg_release(...) \ - __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg -#define atomic_cmpxchg(...) \ - __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_dec_relaxed */ +#ifndef atomic_fetch_dec_relaxed + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) +#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) +#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) +#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) +#else /* atomic_fetch_dec */ +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +#define atomic_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) #endif -#endif /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_read_acquire -#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#ifndef atomic_fetch_dec_release +#define atomic_fetch_dec_release(...) \ + __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) #endif -#ifndef atomic64_set_release -#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(...) \ + __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) #endif +#endif /* atomic_fetch_dec_relaxed */ -/* atomic64_add_return_relaxed */ -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_relaxed atomic64_add_return -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or -#else /* atomic64_add_return_relaxed */ +#else /* atomic_fetch_or_relaxed */ -#ifndef atomic64_add_return_acquire -#define atomic64_add_return_acquire(...) \ - __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return_release -#define atomic64_add_return_release(...) \ - __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return -#define atomic64_add_return(...) \ - __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) #endif -#endif /* atomic64_add_return_relaxed */ +#endif /* atomic_fetch_or_relaxed */ -/* atomic64_inc_return_relaxed */ -#ifndef atomic64_inc_return_relaxed -#define atomic64_inc_return_relaxed atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and -#else /* atomic64_inc_return_relaxed */ +#else /* atomic_fetch_and_relaxed */ -#ifndef atomic64_inc_return_acquire -#define atomic64_inc_return_acquire(...) \ - __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return_release -#define atomic64_inc_return_release(...) \ - __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return -#define atomic64_inc_return(...) \ - __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) #endif -#endif /* atomic64_inc_return_relaxed */ - +#endif /* atomic_fetch_and_relaxed */ -/* atomic64_sub_return_relaxed */ -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return +#ifdef atomic_andnot +/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_fetch_andnot_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot -#else /* atomic64_sub_return_relaxed */ +#else /* atomic_fetch_andnot_relaxed */ -#ifndef atomic64_sub_return_acquire -#define atomic64_sub_return_acquire(...) \ - __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return_release -#define atomic64_sub_return_release(...) \ - __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return -#define atomic64_sub_return(...) \ - __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) #endif -#endif /* atomic64_sub_return_relaxed */ +#endif /* atomic_fetch_andnot_relaxed */ +#endif /* atomic_andnot */ -/* atomic64_dec_return_relaxed */ -#ifndef atomic64_dec_return_relaxed -#define atomic64_dec_return_relaxed atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor -#else /* atomic64_dec_return_relaxed */ +#else /* atomic_fetch_xor_relaxed */ -#ifndef atomic64_dec_return_acquire -#define atomic64_dec_return_acquire(...) \ - __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return_release -#define atomic64_dec_return_release(...) \ - __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return -#define atomic64_dec_return(...) \ - __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) #endif -#endif /* atomic64_dec_return_relaxed */ +#endif /* atomic_fetch_xor_relaxed */ -/* atomic64_xchg_relaxed */ -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_relaxed atomic64_xchg -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg -#else /* atomic64_xchg_relaxed */ +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg -#ifndef atomic64_xchg_acquire -#define atomic64_xchg_acquire(...) \ - __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg_release -#define atomic64_xchg_release(...) \ - __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg -#define atomic64_xchg(...) \ - __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) #endif -#endif /* atomic64_xchg_relaxed */ +#endif /* atomic_xchg_relaxed */ -/* atomic64_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg -#else /* atomic64_cmpxchg_relaxed */ +#else /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_acquire -#define atomic64_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg_release -#define atomic64_cmpxchg_release(...) \ - __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg -#define atomic64_cmpxchg(...) \ - __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) #endif -#endif /* atomic64_cmpxchg_relaxed */ +#endif /* atomic_cmpxchg_relaxed */ /* cmpxchg_relaxed */ #ifndef cmpxchg_relaxed @@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); } -#endif -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot(int i, atomic_t *v) { - atomic_andnot(mask, v); + return atomic_fetch_and(~i, v); } -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - atomic_or(mask, v); + return atomic_fetch_and_relaxed(~i, v); } +static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} + +static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#endif + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -/** - * atomic_fetch_or - perform *p |= mask and return old value of *p - * @mask: mask to OR on the atomic_t - * @p: pointer to atomic_t - */ -#ifndef atomic_fetch_or -static inline int atomic_fetch_or(int mask, atomic_t *p) -{ - int old, val = atomic_read(p); +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif - for (;;) { - old = atomic_cmpxchg(p, val, val | mask); - if (old == val) - break; - val = old; - } +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif - return old; -} +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) #endif -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) #endif +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_inc_relaxed */ +#ifndef atomic64_fetch_inc_relaxed + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) +#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) +#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) +#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) +#else /* atomic64_fetch_inc */ +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +#define atomic64_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc_release +#define atomic64_fetch_inc_release(...) \ + __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(...) \ + __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_inc_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_dec_relaxed */ +#ifndef atomic64_fetch_dec_relaxed + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) +#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) +#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) +#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) +#else /* atomic64_fetch_dec */ +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +#define atomic64_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec_release +#define atomic64_fetch_dec_release(...) \ + __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(...) \ + __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_dec_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifdef atomic64_andnot +/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_fetch_andnot_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ +#endif /* atomic64_andnot */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + #ifndef atomic64_andnot static inline void atomic64_andnot(long long i, atomic64_t *v) { atomic64_and(~i, v); } + +static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} + +static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} + +static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} + +static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} #endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c82794f20110..491a91717788 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -197,7 +197,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) } long congestion_wait(int sync, long timeout); -long wait_iff_congested(struct zone *zone, int sync, long timeout); +long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); int pdflush_proc_obsolete(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 9b0a15d06a4f..79542b2698ec 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -48,6 +48,7 @@ #include <linux/migrate.h> #include <linux/gfp.h> #include <linux/err.h> +#include <linux/fs.h> /* * Balloon device information descriptor. @@ -62,6 +63,7 @@ struct balloon_dev_info { struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); + struct inode *inode; }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); @@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; + balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION -extern bool balloon_page_isolate(struct page *page); +extern const struct address_space_operations balloon_aops; +extern bool balloon_page_isolate(struct page *page, + isolate_mode_t mode); extern void balloon_page_putback(struct page *page); -extern int balloon_page_migrate(struct page *newpage, +extern int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, struct page *page, enum migrate_mode mode); /* - * __is_movable_balloon_page - helper to perform @page PageBalloon tests - */ -static inline bool __is_movable_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - -/* - * balloon_page_movable - test PageBalloon to identify balloon pages - * and PagePrivate to check that the page is not - * isolated and can be moved by compaction/migration. - * - * As we might return false positives in the case of a balloon page being just - * released under us, this need to be re-tested later, under the page lock. - */ -static inline bool balloon_page_movable(struct page *page) -{ - return PageBalloon(page) && PagePrivate(page); -} - -/* - * isolated_balloon_page - identify an isolated balloon page on private - * compaction/migration page lists. - */ -static inline bool isolated_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - -/* * balloon_page_insert - insert a page into the balloon's page list and make * the page->private assignment accordingly. * @balloon : pointer to balloon device @@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageBalloon(page); - SetPagePrivate(page); + __SetPageMovable(page, balloon->inode->i_mapping); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } @@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, static inline void balloon_page_delete(struct page *page) { __ClearPageBalloon(page); + __ClearPageMovable(page); set_page_private(page, 0); - if (PagePrivate(page)) { - ClearPagePrivate(page); + /* + * No touch page.lru field once @page has been isolated + * because VM is using the field. + */ + if (!PageIsolated(page)) list_del(&page->lru); - } } /* diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index a5ac2cad5cb7..b20e3d56253f 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -504,6 +504,9 @@ #define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 #define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 +#define BCMA_CCB_MII_MNG_CTL 0x0000 +#define BCMA_CCB_MII_MNG_CMD_DATA 0x0004 + /* BCM4331 ChipControl numbers. */ #define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */ #define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 9faebf7f9a33..583c10810e32 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -41,44 +41,9 @@ #endif #define BIO_MAX_PAGES 256 -#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) -#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) -/* - * upper 16 bits of bi_rw define the io priority of this bio - */ -#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) -#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) -#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) - -#define bio_set_prio(bio, prio) do { \ - WARN_ON(prio >= (1 << IOPRIO_BITS)); \ - (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ - (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ -} while (0) - -/* - * various member access, note that bio_data should of course not be used - * on highmem page vectors - */ -#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) - -#define bvec_iter_page(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_page) - -#define bvec_iter_len(bvec, iter) \ - min((iter).bi_size, \ - __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) - -#define bvec_iter_offset(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) - -#define bvec_iter_bvec(bvec, iter) \ -((struct bio_vec) { \ - .bv_page = bvec_iter_page((bvec), (iter)), \ - .bv_len = bvec_iter_len((bvec), (iter)), \ - .bv_offset = bvec_iter_offset((bvec), (iter)), \ -}) +#define bio_prio(bio) (bio)->bi_ioprio +#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) @@ -106,18 +71,23 @@ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && - !(bio->bi_rw & REQ_DISCARD)) + bio_op(bio) != REQ_OP_DISCARD) return true; return false; } +static inline bool bio_no_advance_iter(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; +} + static inline bool bio_is_rw(struct bio *bio) { if (!bio_has_data(bio)) return false; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) return false; return true; @@ -193,39 +163,12 @@ static inline void *bio_data(struct bio *bio) #define bio_for_each_segment_all(bvl, bio, i) \ for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) -static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, - unsigned bytes) -{ - WARN_ONCE(bytes > iter->bi_size, - "Attempted to advance past end of bvec iter\n"); - - while (bytes) { - unsigned len = min(bytes, bvec_iter_len(bv, *iter)); - - bytes -= len; - iter->bi_size -= len; - iter->bi_bvec_done += len; - - if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { - iter->bi_bvec_done = 0; - iter->bi_idx++; - } - } -} - -#define for_each_bvec(bvl, bio_vec, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) - - static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) { iter->bi_sector += bytes >> 9; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); @@ -253,10 +196,10 @@ static inline unsigned bio_segments(struct bio *bio) * differently: */ - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return 1; - if (bio->bi_rw & REQ_WRITE_SAME) + if (bio_op(bio) == REQ_OP_WRITE_SAME) return 1; bio_for_each_segment(bv, bio, iter) @@ -473,7 +416,7 @@ static inline void bio_io_error(struct bio *bio) struct request_queue; extern int bio_phys_segments(struct request_queue *, struct bio *); -extern int submit_bio_wait(int rw, struct bio *bio); +extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); extern void bio_init(struct bio *); @@ -720,8 +663,6 @@ static inline void bio_inc_remaining(struct bio *bio) * and the bvec_slabs[]. */ #define BIO_POOL_SIZE 2 -#define BIOVEC_NR_POOLS 6 -#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { struct kmem_cache *bio_slab; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e9b0b9ab07e5..27bfc0b631a9 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -267,6 +267,10 @@ static inline int bitmap_equal(const unsigned long *src1, { if (small_const_nbits(nbits)) return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); +#ifdef CONFIG_S390 + else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) + return !memcmp(src1, src2, nbits / 8); +#endif else return __bitmap_equal(src1, src2, nbits); } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index c02e669945e9..f77150a4a96a 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) /** * blkg_rwstat_add - add a value to a blkg_rwstat * @rwstat: target blkg_rwstat - * @rw: mask of REQ_{WRITE|SYNC} + * @op: REQ_OP + * @op_flags: rq_flag_bits * @val: value to add * * Add @val to @rwstat. The counters are chosen according to @rw. The * caller is responsible for synchronizing calls to this function. */ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, - int rw, uint64_t val) + int op, int op_flags, uint64_t val) { struct percpu_counter *cnt; - if (rw & REQ_WRITE) + if (op_is_write(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); - if (rw & REQ_SYNC) + if (op_flags & REQ_SYNC) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; @@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, + blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); + blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2498fdf3a503..e43bbffb5b7a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -96,6 +96,7 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int, unsigned int, unsigned int); typedef void (exit_request_fn)(void *, struct request *, unsigned int, unsigned int); +typedef int (reinit_request_fn)(void *, struct request *); typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); @@ -145,6 +146,7 @@ struct blk_mq_ops { */ init_request_fn *init_request; exit_request_fn *exit_request; + reinit_request_fn *reinit_request; }; enum { @@ -196,6 +198,8 @@ enum { struct request *blk_mq_alloc_request(struct request_queue *q, int rw, unsigned int flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, + unsigned int flags, unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); @@ -243,6 +247,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 77e5d81f07aa..f254eb264924 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -6,6 +6,7 @@ #define __LINUX_BLK_TYPES_H #include <linux/types.h> +#include <linux/bvec.h> struct bio_set; struct bio; @@ -17,28 +18,7 @@ struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *); -/* - * was unsigned short, but we might as well be ready for > 64kB I/O pages - */ -struct bio_vec { - struct page *bv_page; - unsigned int bv_len; - unsigned int bv_offset; -}; - #ifdef CONFIG_BLOCK - -struct bvec_iter { - sector_t bi_sector; /* device address in 512 byte - sectors */ - unsigned int bi_size; /* residual I/O count */ - - unsigned int bi_idx; /* current index into bvl_vec */ - - unsigned int bi_bvec_done; /* number of bytes completed in - current bvec */ -}; - /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -46,11 +26,12 @@ struct bvec_iter { struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; - unsigned int bi_flags; /* status, command, etc */ int bi_error; - unsigned long bi_rw; /* bottom bits READ/WRITE, - * top bits priority + unsigned int bi_rw; /* bottom bits req flags, + * top bits REQ_OP */ + unsigned short bi_flags; /* status, command, etc */ + unsigned short bi_ioprio; struct bvec_iter bi_iter; @@ -107,6 +88,16 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; +#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) +#define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT) + +#define bio_set_op_attrs(bio, op, op_flags) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \ + (bio)->bi_rw |= op_flags; \ +} while (0) + #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* @@ -123,19 +114,25 @@ struct bio { /* * Flags starting here get preserved by bio_reset() - this includes - * BIO_POOL_IDX() + * BVEC_POOL_IDX() + */ +#define BIO_RESET_BITS 10 + +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. */ -#define BIO_RESET_BITS 13 -#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) /* - * top 4 bits of bio flags indicate the pool this bio came from + * Top 4 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. */ -#define BIO_POOL_BITS (4) -#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) -#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS) -#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) -#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) +#define BVEC_POOL_BITS (4) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) #endif /* CONFIG_BLOCK */ @@ -145,7 +142,6 @@ struct bio { */ enum rq_flag_bits { /* common flags */ - __REQ_WRITE, /* not set, read. set, write */ __REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ @@ -153,14 +149,11 @@ enum rq_flag_bits { __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - __REQ_DISCARD, /* request to discard sectors */ - __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ - __REQ_WRITE_SAME, /* write same block many times */ __REQ_NOIDLE, /* don't anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ - __REQ_FLUSH, /* request for cache flush */ + __REQ_PREFLUSH, /* request for cache flush */ /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ @@ -191,31 +184,25 @@ enum rq_flag_bits { __REQ_NR_BITS, /* stops here */ }; -#define REQ_WRITE (1ULL << __REQ_WRITE) #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_DISCARD (1ULL << __REQ_DISCARD) -#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ - (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ - REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ - REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) + (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ + REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) #define REQ_CLONE_MASK REQ_COMMON_MASK -#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) - /* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) @@ -233,15 +220,25 @@ enum rq_flag_bits { #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) -#define REQ_FLUSH (1ULL << __REQ_FLUSH) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) -#define REQ_SECURE (1ULL << __REQ_SECURE) #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +enum req_op { + REQ_OP_READ, + REQ_OP_WRITE, + REQ_OP_DISCARD, /* request to discard sectors */ + REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ + REQ_OP_WRITE_SAME, /* write same block many times */ + REQ_OP_FLUSH, /* request for cache flush */ +}; + +#define REQ_OP_BITS 3 + typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U #define BLK_QC_T_SHIFT 16 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3d9cf326574f..adf33079771e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,18 +90,17 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - unsigned long fifo_time; + u64 fifo_time; }; struct request_queue *q; struct blk_mq_ctx *mq_ctx; - u64 cmd_flags; + int cpu; unsigned cmd_type; + u64 cmd_flags; unsigned long atomic_flags; - int cpu; - /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ @@ -200,6 +199,20 @@ struct request { struct request *next_rq; }; +#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) +#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) + +#define req_set_op(req, op) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ + (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ +} while (0) + +#define req_set_op_attrs(req, op, flags) do { \ + req_set_op(req, op); \ + (req)->cmd_flags |= flags; \ +} while (0) + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -483,7 +496,7 @@ struct request_queue { #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ -#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ @@ -492,6 +505,7 @@ struct request_queue { #define QUEUE_FLAG_WC 23 /* Write back caching */ #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ +#define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -579,8 +593,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ - test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) +#define blk_queue_secure_erase(q) \ + (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) /* * Driver can handle struct request, if it either has an old style @@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) /* * We regard a request as sync, if either a read or a sync write */ -static inline bool rw_is_sync(unsigned int rw_flags) +static inline bool rw_is_sync(int op, unsigned int rw_flags) { - return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); + return op == REQ_OP_READ || (rw_flags & REQ_SYNC); } static inline bool rq_is_sync(struct request *rq) { - return rw_is_sync(rq->cmd_flags); + return rw_is_sync(req_op(rq), rq->cmd_flags); } static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -652,22 +667,10 @@ static inline bool rq_mergeable(struct request *rq) if (rq->cmd_type != REQ_TYPE_FS) return false; - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + if (req_op(rq) == REQ_OP_FLUSH) return false; - return true; -} - -static inline bool blk_check_merge_flags(unsigned int flags1, - unsigned int flags2) -{ - if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) - return false; - - if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) - return false; - - if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; return true; @@ -786,8 +789,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); -extern struct request *blk_make_request(struct request_queue *, struct bio *, - gfp_t); extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, @@ -800,6 +801,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **, struct bio_set *); @@ -879,12 +881,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, - unsigned int cmd_flags) + int op) { - if (unlikely(cmd_flags & REQ_DISCARD)) + if (unlikely(op == REQ_OP_DISCARD)) return min(q->limits.max_discard_sectors, UINT_MAX >> 9); - if (unlikely(cmd_flags & REQ_WRITE_SAME)) + if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; return q->limits.max_sectors; @@ -904,18 +906,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, (offset & (q->limits.chunk_sectors - 1)); } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq) +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) { struct request_queue *q = rq->q; if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; - if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) - return blk_queue_get_max_sectors(q, rq->cmd_flags); + if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) + return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, blk_rq_pos(rq)), - blk_queue_get_max_sectors(q, rq->cmd_flags)); + return min(blk_max_size_offset(q, offset), + blk_queue_get_max_sectors(q, req_op(rq))); } static inline unsigned int blk_rq_count_bios(struct request *rq) @@ -1135,13 +1138,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, return bqt->tag_index[tag]; } -#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ + +#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ +#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, @@ -1659,7 +1665,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req, */ struct blk_dax_ctl { sector_t sector; - void __pmem *addr; + void *addr; long size; pfn_t pfn; }; @@ -1670,8 +1676,8 @@ struct block_device_operations { int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - long (*direct_access)(struct block_device *, sector_t, void __pmem **, - pfn_t *, long); + long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, + long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0f3172b8b225..cceb72f9e29f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) } extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); +extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0de4de6dd43e..11134238417d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -11,14 +11,17 @@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> +#include <linux/err.h> +struct perf_event; struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ struct bpf_map *(*map_alloc)(union bpf_attr *attr); - void (*map_free)(struct bpf_map *); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); /* funcs callable from userspace and from eBPF programs */ @@ -27,8 +30,9 @@ struct bpf_map_ops { int (*map_delete_elem)(struct bpf_map *map, void *key); /* funcs called by prog_array and perf_event_array map */ - void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); - void (*map_fd_put_ptr) (void *ptr); + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); }; struct bpf_map { @@ -189,15 +193,28 @@ struct bpf_array { void __percpu *pptrs[0] __aligned(8); }; }; + #define MAX_TAIL_CALL_CNT 32 +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_fd_array_map_clear(struct bpf_map *map); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); -const struct bpf_func_proto *bpf_get_event_output_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long off, unsigned long len); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); @@ -206,9 +223,10 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); +struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); -void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); @@ -231,8 +249,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, u64 flags); int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); + int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +void bpf_fd_array_map_clear(struct bpf_map *map); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent @@ -261,11 +284,17 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } -static inline void bpf_prog_put(struct bpf_prog *prog) +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} +static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) { + return ERR_PTR(-EOPNOTSUPP); } -static inline void bpf_prog_put_rcu(struct bpf_prog *prog) +static inline void bpf_prog_put(struct bpf_prog *prog) { } #endif /* CONFIG_BPF_SYSCALL */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d48daa3f6f20..ebbacd14d450 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); -void ll_rw_block(int, int, struct buffer_head * bh[]); +void ll_rw_block(int, int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); -int __sync_dirty_buffer(struct buffer_head *bh, int rw); -void write_dirty_buffer(struct buffer_head *bh, int rw); -int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags); -int submit_bh(int, struct buffer_head *); +int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); +void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int _submit_bh(int op, int op_flags, struct buffer_head *bh, + unsigned long bio_flags); +int submit_bh(int, int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); int bh_uptodate_or_lock(struct buffer_head *bh); @@ -208,6 +209,9 @@ void block_invalidatepage(struct page *page, unsigned int offset, unsigned int length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +int __block_write_full_page(struct inode *inode, struct page *page, + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 000000000000..701b64a3b7c5 --- /dev/null +++ b/include/linux/bvec.h @@ -0,0 +1,96 @@ +/* + * bvec iterator + * + * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H + +#include <linux/kernel.h> +#include <linux/bug.h> + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ + + unsigned int bi_bvec_done; /* number of bytes completed in + current bvec */ +}; + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) + +#define bvec_iter_page(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_page) + +#define bvec_iter_len(bvec, iter) \ + min((iter).bi_size, \ + __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) + +#define bvec_iter_offset(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) + +#define bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = bvec_iter_page((bvec), (iter)), \ + .bv_len = bvec_iter_len((bvec), (iter)), \ + .bv_offset = bvec_iter_offset((bvec), (iter)), \ +}) + +static inline void bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned bytes) +{ + WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n"); + + while (bytes) { + unsigned len = min(bytes, bvec_iter_len(bv, *iter)); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + + if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } + } +} + +#define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ + bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h new file mode 100644 index 000000000000..82c3d3b7269d --- /dev/null +++ b/include/linux/cec-funcs.h @@ -0,0 +1,1899 @@ +/* + * cec - HDMI Consumer Electronics Control message functions + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_FUNCS_H +#define _CEC_UAPI_FUNCS_H + +#include <linux/cec.h> + +/* One Touch Play Feature */ +static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_active_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_image_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON; +} + +static inline void cec_msg_text_view_on(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TEXT_VIEW_ON; +} + + +/* Routing Control Feature */ +static inline void cec_msg_inactive_source(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_INACTIVE_SOURCE; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_inactive_source(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_request_active_source(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE; + msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0; +} + +static inline void cec_msg_routing_information(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_INFORMATION; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_routing_information(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_routing_change(struct cec_msg *msg, + bool reply, + __u16 orig_phys_addr, + __u16 new_phys_addr) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_ROUTING_CHANGE; + msg->msg[2] = orig_phys_addr >> 8; + msg->msg[3] = orig_phys_addr & 0xff; + msg->msg[4] = new_phys_addr >> 8; + msg->msg[5] = new_phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0; +} + +static inline void cec_ops_routing_change(const struct cec_msg *msg, + __u16 *orig_phys_addr, + __u16 *new_phys_addr) +{ + *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5]; +} + +static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_STREAM_PATH; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; +} + +static inline void cec_ops_set_stream_path(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Standby Feature */ +static inline void cec_msg_standby(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_STANDBY; +} + + +/* One Touch Record Feature */ +static inline void cec_msg_record_off(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_OFF; +} + +struct cec_op_arib_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_atsc_data { + __u16 transport_id; + __u16 program_number; +}; + +struct cec_op_dvb_data { + __u16 transport_id; + __u16 service_id; + __u16 orig_network_id; +}; + +struct cec_op_channel_data { + __u8 channel_number_fmt; + __u16 major; + __u16 minor; +}; + +struct cec_op_digital_service_id { + __u8 service_id_method; + __u8 dig_bcast_system; + union { + struct cec_op_arib_data arib; + struct cec_op_atsc_data atsc; + struct cec_op_dvb_data dvb; + struct cec_op_channel_data channel; + }; +}; + +struct cec_op_record_src { + __u8 type; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + struct { + __u8 plug; + } ext_plug; + struct { + __u16 phys_addr; + } ext_phys_addr; + }; +}; + +static inline void cec_set_digital_service_id(__u8 *msg, + const struct cec_op_digital_service_id *digital) +{ + *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + *msg++ = (digital->channel.channel_number_fmt << 2) | + (digital->channel.major >> 8); + *msg++ = digital->channel.major && 0xff; + *msg++ = digital->channel.minor >> 8; + *msg++ = digital->channel.minor & 0xff; + *msg++ = 0; + *msg++ = 0; + return; + } + switch (digital->dig_bcast_system) { + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT: + case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T: + *msg++ = digital->atsc.transport_id >> 8; + *msg++ = digital->atsc.transport_id & 0xff; + *msg++ = digital->atsc.program_number >> 8; + *msg++ = digital->atsc.program_number & 0xff; + *msg++ = 0; + *msg++ = 0; + break; + default: + *msg++ = digital->dvb.transport_id >> 8; + *msg++ = digital->dvb.transport_id & 0xff; + *msg++ = digital->dvb.service_id >> 8; + *msg++ = digital->dvb.service_id & 0xff; + *msg++ = digital->dvb.orig_network_id >> 8; + *msg++ = digital->dvb.orig_network_id & 0xff; + break; + } +} + +static inline void cec_get_digital_service_id(const __u8 *msg, + struct cec_op_digital_service_id *digital) +{ + digital->service_id_method = msg[0] >> 7; + digital->dig_bcast_system = msg[0] & 0x7f; + if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { + digital->channel.channel_number_fmt = msg[1] >> 2; + digital->channel.major = ((msg[1] & 3) << 6) | msg[2]; + digital->channel.minor = (msg[3] << 8) | msg[4]; + return; + } + digital->dvb.transport_id = (msg[1] << 8) | msg[2]; + digital->dvb.service_id = (msg[3] << 8) | msg[4]; + digital->dvb.orig_network_id = (msg[5] << 8) | msg[6]; +} + +static inline void cec_msg_record_on_own(struct cec_msg *msg) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_OWN; +} + +static inline void cec_msg_record_on_digital(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_record_on_analog(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_record_on_plug(struct cec_msg *msg, + __u8 plug) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG; + msg->msg[3] = plug; +} + +static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, + __u16 phys_addr) +{ + msg->len = 5; + msg->msg[1] = CEC_MSG_RECORD_ON; + msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR; + msg->msg[3] = phys_addr >> 8; + msg->msg[4] = phys_addr & 0xff; +} + +static inline void cec_msg_record_on(struct cec_msg *msg, + const struct cec_op_record_src *rec_src) +{ + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + cec_msg_record_on_own(msg); + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_msg_record_on_digital(msg, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + cec_msg_record_on_analog(msg, + rec_src->analog.ana_bcast_type, + rec_src->analog.ana_freq, + rec_src->analog.bcast_system); + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + cec_msg_record_on_plug(msg, rec_src->ext_plug.plug); + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + cec_msg_record_on_phys_addr(msg, + rec_src->ext_phys_addr.phys_addr); + break; + } +} + +static inline void cec_ops_record_on(const struct cec_msg *msg, + struct cec_op_record_src *rec_src) +{ + rec_src->type = msg->msg[2]; + switch (rec_src->type) { + case CEC_OP_RECORD_SRC_OWN: + break; + case CEC_OP_RECORD_SRC_DIGITAL: + cec_get_digital_service_id(msg->msg + 3, &rec_src->digital); + break; + case CEC_OP_RECORD_SRC_ANALOG: + rec_src->analog.ana_bcast_type = msg->msg[3]; + rec_src->analog.ana_freq = + (msg->msg[4] << 8) | msg->msg[5]; + rec_src->analog.bcast_system = msg->msg[6]; + break; + case CEC_OP_RECORD_SRC_EXT_PLUG: + rec_src->ext_plug.plug = msg->msg[3]; + break; + case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: + rec_src->ext_phys_addr.phys_addr = + (msg->msg[3] << 8) | msg->msg[4]; + break; + } +} + +static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_RECORD_STATUS; + msg->msg[2] = rec_status; +} + +static inline void cec_ops_record_status(const struct cec_msg *msg, + __u8 *rec_status) +{ + *rec_status = msg->msg[2]; +} + +static inline void cec_msg_record_tv_screen(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN; + msg->reply = reply ? CEC_MSG_RECORD_ON : 0; +} + + +/* Timer Programming Feature */ +static inline void cec_msg_timer_status(struct cec_msg *msg, + __u8 timer_overlap_warning, + __u8 media_info, + __u8 prog_info, + __u8 prog_error, + __u8 duration_hr, + __u8 duration_min) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_STATUS; + msg->msg[2] = (timer_overlap_warning << 7) | + (media_info << 5) | + (prog_info ? 0x10 : 0) | + (prog_info ? prog_info : prog_error); + if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + msg->len += 2; + msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10); + } +} + +static inline void cec_ops_timer_status(const struct cec_msg *msg, + __u8 *timer_overlap_warning, + __u8 *media_info, + __u8 *prog_info, + __u8 *prog_error, + __u8 *duration_hr, + __u8 *duration_min) +{ + *timer_overlap_warning = msg->msg[2] >> 7; + *media_info = (msg->msg[2] >> 5) & 3; + if (msg->msg[2] & 0x10) { + *prog_info = msg->msg[2] & 0xf; + *prog_error = 0; + } else { + *prog_info = 0; + *prog_error = msg->msg[2] & 0xf; + } + if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || + *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || + *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { + *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf); + *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + } else { + *duration_hr = *duration_min = 0; + } +} + +static inline void cec_msg_timer_cleared_status(struct cec_msg *msg, + __u8 timer_cleared_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS; + msg->msg[2] = timer_cleared_status; +} + +static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg, + __u8 *timer_cleared_status) +{ + *timer_cleared_status = msg->msg[2]; +} + +static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_clear_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; + msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_clear_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; +} + +static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_analogue_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ana_bcast_type; + msg->msg[10] = ana_freq >> 8; + msg->msg[11] = ana_freq & 0xff; + msg->msg[12] = bcast_system; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ana_bcast_type = msg->msg[9]; + *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; + *bcast_system = msg->msg[12]; +} + +static inline void cec_msg_set_digital_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 16; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; + msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + cec_set_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_ops_set_digital_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + struct cec_op_digital_service_id *digital) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + cec_get_digital_service_id(msg->msg + 9, digital); +} + +static inline void cec_msg_set_ext_timer(struct cec_msg *msg, + bool reply, + __u8 day, + __u8 month, + __u8 start_hr, + __u8 start_min, + __u8 duration_hr, + __u8 duration_min, + __u8 recording_seq, + __u8 ext_src_spec, + __u8 plug, + __u16 phys_addr) +{ + msg->len = 13; + msg->msg[1] = CEC_MSG_SET_EXT_TIMER; + msg->msg[2] = day; + msg->msg[3] = month; + /* Hours and minutes are in BCD format */ + msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); + msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); + msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); + msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); + msg->msg[8] = recording_seq; + msg->msg[9] = ext_src_spec; + msg->msg[10] = plug; + msg->msg[11] = phys_addr >> 8; + msg->msg[12] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; +} + +static inline void cec_ops_set_ext_timer(const struct cec_msg *msg, + __u8 *day, + __u8 *month, + __u8 *start_hr, + __u8 *start_min, + __u8 *duration_hr, + __u8 *duration_min, + __u8 *recording_seq, + __u8 *ext_src_spec, + __u8 *plug, + __u16 *phys_addr) +{ + *day = msg->msg[2]; + *month = msg->msg[3]; + /* Hours and minutes are in BCD format */ + *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); + *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); + *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); + *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); + *recording_seq = msg->msg[8]; + *ext_src_spec = msg->msg[9]; + *plug = msg->msg[10]; + *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; +} + +static inline void cec_msg_set_timer_program_title(struct cec_msg *msg, + const char *prog_title) +{ + unsigned int len = strlen(prog_title); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE; + memcpy(msg->msg + 2, prog_title, len); +} + +static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg, + char *prog_title) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(prog_title, msg->msg + 2, len); + prog_title[len] = '\0'; +} + +/* System Information Feature */ +static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_CEC_VERSION; + msg->msg[2] = cec_version; +} + +static inline void cec_ops_cec_version(const struct cec_msg *msg, + __u8 *cec_version) +{ + *cec_version = msg->msg[2]; +} + +static inline void cec_msg_get_cec_version(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_CEC_VERSION; + msg->reply = reply ? CEC_MSG_CEC_VERSION : 0; +} + +static inline void cec_msg_report_physical_addr(struct cec_msg *msg, + __u16 phys_addr, __u8 prim_devtype) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = prim_devtype; +} + +static inline void cec_ops_report_physical_addr(const struct cec_msg *msg, + __u16 *phys_addr, __u8 *prim_devtype) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *prim_devtype = msg->msg[4]; +} + +static inline void cec_msg_give_physical_addr(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR; + msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0; +} + +static inline void cec_msg_set_menu_language(struct cec_msg *msg, + const char *language) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE; + memcpy(msg->msg + 2, language, 3); +} + +static inline void cec_ops_set_menu_language(const struct cec_msg *msg, + char *language) +{ + memcpy(language, msg->msg + 2, 3); + language[3] = '\0'; +} + +static inline void cec_msg_get_menu_language(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE; + msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0; +} + +/* + * Assumes a single RC Profile byte and a single Device Features byte, + * i.e. no extended features are supported by this helper function. + * + * As of CEC 2.0 no extended features are defined, should those be added + * in the future, then this function needs to be adapted or a new function + * should be added. + */ +static inline void cec_msg_report_features(struct cec_msg *msg, + __u8 cec_version, __u8 all_device_types, + __u8 rc_profile, __u8 dev_features) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_FEATURES; + msg->msg[2] = cec_version; + msg->msg[3] = all_device_types; + msg->msg[4] = rc_profile; + msg->msg[5] = dev_features; +} + +static inline void cec_ops_report_features(const struct cec_msg *msg, + __u8 *cec_version, __u8 *all_device_types, + const __u8 **rc_profile, const __u8 **dev_features) +{ + const __u8 *p = &msg->msg[4]; + + *cec_version = msg->msg[2]; + *all_device_types = msg->msg[3]; + *rc_profile = p; + while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT)) + p++; + if (!(*p & CEC_OP_FEAT_EXT)) { + *dev_features = p + 1; + while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT)) + p++; + } + if (*p & CEC_OP_FEAT_EXT) + *rc_profile = *dev_features = NULL; +} + +static inline void cec_msg_give_features(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_FEATURES; + msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0; +} + +/* Deck Control Feature */ +static inline void cec_msg_deck_control(struct cec_msg *msg, + __u8 deck_control_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_CONTROL; + msg->msg[2] = deck_control_mode; +} + +static inline void cec_ops_deck_control(const struct cec_msg *msg, + __u8 *deck_control_mode) +{ + *deck_control_mode = msg->msg[2]; +} + +static inline void cec_msg_deck_status(struct cec_msg *msg, + __u8 deck_info) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_DECK_STATUS; + msg->msg[2] = deck_info; +} + +static inline void cec_ops_deck_status(const struct cec_msg *msg, + __u8 *deck_info) +{ + *deck_info = msg->msg[2]; +} + +static inline void cec_msg_give_deck_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; +} + +static inline void cec_ops_give_deck_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_play(struct cec_msg *msg, + __u8 play_mode) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_PLAY; + msg->msg[2] = play_mode; +} + +static inline void cec_ops_play(const struct cec_msg *msg, + __u8 *play_mode) +{ + *play_mode = msg->msg[2]; +} + + +/* Tuner Control Feature */ +struct cec_op_tuner_device_info { + __u8 rec_flag; + __u8 tuner_display_info; + bool is_analog; + union { + struct cec_op_digital_service_id digital; + struct { + __u8 ana_bcast_type; + __u16 ana_freq; + __u8 bcast_system; + } analog; + }; +}; + +static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg, + __u8 rec_flag, + __u8 tuner_display_info, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 7; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + msg->msg[3] = ana_bcast_type; + msg->msg[4] = ana_freq >> 8; + msg->msg[5] = ana_freq & 0xff; + msg->msg[6] = bcast_system; +} + +static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg, + __u8 rec_flag, __u8 tuner_display_info, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 10; + msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; + msg->msg[2] = (rec_flag << 7) | tuner_display_info; + cec_set_digital_service_id(msg->msg + 3, digital); +} + +static inline void cec_msg_tuner_device_status(struct cec_msg *msg, + const struct cec_op_tuner_device_info *tuner_dev_info) +{ + if (tuner_dev_info->is_analog) + cec_msg_tuner_device_status_analog(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + tuner_dev_info->analog.ana_bcast_type, + tuner_dev_info->analog.ana_freq, + tuner_dev_info->analog.bcast_system); + else + cec_msg_tuner_device_status_digital(msg, + tuner_dev_info->rec_flag, + tuner_dev_info->tuner_display_info, + &tuner_dev_info->digital); +} + +static inline void cec_ops_tuner_device_status(const struct cec_msg *msg, + struct cec_op_tuner_device_info *tuner_dev_info) +{ + tuner_dev_info->is_analog = msg->len < 10; + tuner_dev_info->rec_flag = msg->msg[2] >> 7; + tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f; + if (tuner_dev_info->is_analog) { + tuner_dev_info->analog.ana_bcast_type = msg->msg[3]; + tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5]; + tuner_dev_info->analog.bcast_system = msg->msg[6]; + return; + } + cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital); +} + +static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, + bool reply, + __u8 status_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; + msg->msg[2] = status_req; + msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; +} + +static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, + __u8 *status_req) +{ + *status_req = msg->msg[2]; +} + +static inline void cec_msg_select_analogue_service(struct cec_msg *msg, + __u8 ana_bcast_type, + __u16 ana_freq, + __u8 bcast_system) +{ + msg->len = 6; + msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE; + msg->msg[2] = ana_bcast_type; + msg->msg[3] = ana_freq >> 8; + msg->msg[4] = ana_freq & 0xff; + msg->msg[5] = bcast_system; +} + +static inline void cec_ops_select_analogue_service(const struct cec_msg *msg, + __u8 *ana_bcast_type, + __u16 *ana_freq, + __u8 *bcast_system) +{ + *ana_bcast_type = msg->msg[2]; + *ana_freq = (msg->msg[3] << 8) | msg->msg[4]; + *bcast_system = msg->msg[5]; +} + +static inline void cec_msg_select_digital_service(struct cec_msg *msg, + const struct cec_op_digital_service_id *digital) +{ + msg->len = 9; + msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE; + cec_set_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_ops_select_digital_service(const struct cec_msg *msg, + struct cec_op_digital_service_id *digital) +{ + cec_get_digital_service_id(msg->msg + 2, digital); +} + +static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT; +} + +static inline void cec_msg_tuner_step_increment(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT; +} + + +/* Vendor Specific Commands Feature */ +static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; +} + +static inline void cec_ops_device_vendor_id(const struct cec_msg *msg, + __u32 *vendor_id) +{ + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; +} + +static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID; + msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; +} + +static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP; +} + + +/* OSD Display Feature */ +static inline void cec_msg_set_osd_string(struct cec_msg *msg, + __u8 disp_ctl, + const char *osd) +{ + unsigned int len = strlen(osd); + + if (len > 13) + len = 13; + msg->len = 3 + len; + msg->msg[1] = CEC_MSG_SET_OSD_STRING; + msg->msg[2] = disp_ctl; + memcpy(msg->msg + 3, osd, len); +} + +static inline void cec_ops_set_osd_string(const struct cec_msg *msg, + __u8 *disp_ctl, + char *osd) +{ + unsigned int len = msg->len > 3 ? msg->len - 3 : 0; + + *disp_ctl = msg->msg[2]; + if (len > 13) + len = 13; + memcpy(osd, msg->msg + 3, len); + osd[len] = '\0'; +} + + +/* Device OSD Transfer Feature */ +static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name) +{ + unsigned int len = strlen(name); + + if (len > 14) + len = 14; + msg->len = 2 + len; + msg->msg[1] = CEC_MSG_SET_OSD_NAME; + memcpy(msg->msg + 2, name, len); +} + +static inline void cec_ops_set_osd_name(const struct cec_msg *msg, + char *name) +{ + unsigned int len = msg->len > 2 ? msg->len - 2 : 0; + + if (len > 14) + len = 14; + memcpy(name, msg->msg + 2, len); + name[len] = '\0'; +} + +static inline void cec_msg_give_osd_name(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_OSD_NAME; + msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0; +} + + +/* Device Menu Control Feature */ +static inline void cec_msg_menu_status(struct cec_msg *msg, + __u8 menu_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_STATUS; + msg->msg[2] = menu_state; +} + +static inline void cec_ops_menu_status(const struct cec_msg *msg, + __u8 *menu_state) +{ + *menu_state = msg->msg[2]; +} + +static inline void cec_msg_menu_request(struct cec_msg *msg, + bool reply, + __u8 menu_req) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_MENU_REQUEST; + msg->msg[2] = menu_req; + msg->reply = reply ? CEC_MSG_MENU_STATUS : 0; +} + +static inline void cec_ops_menu_request(const struct cec_msg *msg, + __u8 *menu_req) +{ + *menu_req = msg->msg[2]; +} + +struct cec_op_ui_command { + __u8 ui_cmd; + bool has_opt_arg; + union { + struct cec_op_channel_data channel_identifier; + __u8 ui_broadcast_type; + __u8 ui_sound_presentation_control; + __u8 play_mode; + __u8 ui_function_media; + __u8 ui_function_select_av_input; + __u8 ui_function_select_audio_input; + }; +}; + +static inline void cec_msg_user_control_pressed(struct cec_msg *msg, + const struct cec_op_ui_command *ui_cmd) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED; + msg->msg[2] = ui_cmd->ui_cmd; + if (!ui_cmd->has_opt_arg) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + msg->len++; + msg->msg[3] = ui_cmd->play_mode; + break; + case 0x67: + msg->len += 4; + msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | + (ui_cmd->channel_identifier.major >> 8); + msg->msg[4] = ui_cmd->channel_identifier.major && 0xff; + msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; + msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; + break; + } +} + +static inline void cec_ops_user_control_pressed(const struct cec_msg *msg, + struct cec_op_ui_command *ui_cmd) +{ + ui_cmd->ui_cmd = msg->msg[2]; + ui_cmd->has_opt_arg = false; + if (msg->len == 3) + return; + switch (ui_cmd->ui_cmd) { + case 0x56: + case 0x57: + case 0x60: + case 0x68: + case 0x69: + case 0x6a: + /* The optional operand is one byte for all these ui commands */ + ui_cmd->play_mode = msg->msg[3]; + ui_cmd->has_opt_arg = true; + break; + case 0x67: + if (msg->len < 7) + break; + ui_cmd->has_opt_arg = true; + ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2; + ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4]; + ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6]; + break; + } +} + +static inline void cec_msg_user_control_released(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED; +} + +/* Remote Control Passthrough Feature */ + +/* Power Status Feature */ +static inline void cec_msg_report_power_status(struct cec_msg *msg, + __u8 pwr_state) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS; + msg->msg[2] = pwr_state; +} + +static inline void cec_ops_report_power_status(const struct cec_msg *msg, + __u8 *pwr_state) +{ + *pwr_state = msg->msg[2]; +} + +static inline void cec_msg_give_device_power_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0; +} + +/* General Protocol Messages */ +static inline void cec_msg_feature_abort(struct cec_msg *msg, + __u8 abort_msg, __u8 reason) +{ + msg->len = 4; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; + msg->msg[2] = abort_msg; + msg->msg[3] = reason; +} + +static inline void cec_ops_feature_abort(const struct cec_msg *msg, + __u8 *abort_msg, __u8 *reason) +{ + *abort_msg = msg->msg[2]; + *reason = msg->msg[3]; +} + +/* This changes the current message into a feature abort message */ +static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason) +{ + cec_msg_set_reply_to(msg, msg); + msg->len = 4; + msg->msg[2] = msg->msg[1]; + msg->msg[3] = reason; + msg->msg[1] = CEC_MSG_FEATURE_ABORT; +} + +static inline void cec_msg_abort(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_ABORT; +} + + +/* System Audio Control Feature */ +static inline void cec_msg_report_audio_status(struct cec_msg *msg, + __u8 aud_mute_status, + __u8 aud_vol_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS; + msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f); +} + +static inline void cec_ops_report_audio_status(const struct cec_msg *msg, + __u8 *aud_mute_status, + __u8 *aud_vol_status) +{ + *aud_mute_status = msg->msg[2] >> 7; + *aud_vol_status = msg->msg[2] & 0x7f; +} + +static inline void cec_msg_give_audio_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS; + msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0; +} + +static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = phys_addr == 0xffff ? 2 : 4; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0; + +} + +static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg, + __u16 *phys_addr) +{ + if (msg->len < 4) + *phys_addr = 0xffff; + else + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg, + __u8 sys_aud_status) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS; + msg->msg[2] = sys_aud_status; +} + +static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg, + __u8 *sys_aud_status) +{ + *sys_aud_status = msg->msg[2]; +} + +static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS; + msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0; +} + +static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg, + __u8 num_descriptors, + const __u32 *descriptors) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors * 3; + msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR; + for (i = 0; i < num_descriptors; i++) { + msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff; + msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff; + msg->msg[4 + i * 3] = descriptors[i] & 0xff; + } +} + +static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u32 *descriptors) +{ + unsigned int i; + + *num_descriptors = (msg->len - 2) / 3; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) + descriptors[i] = (msg->msg[2 + i * 3] << 16) | + (msg->msg[3 + i * 3] << 8) | + msg->msg[4 + i * 3]; +} + +static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg, + bool reply, + __u8 num_descriptors, + const __u8 *audio_format_id, + const __u8 *audio_format_code) +{ + unsigned int i; + + if (num_descriptors > 4) + num_descriptors = 4; + msg->len = 2 + num_descriptors; + msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR; + msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0; + for (i = 0; i < num_descriptors; i++) + msg->msg[2 + i] = (audio_format_id[i] << 6) | + (audio_format_code[i] & 0x3f); +} + +static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg, + __u8 *num_descriptors, + __u8 *audio_format_id, + __u8 *audio_format_code) +{ + unsigned int i; + + *num_descriptors = msg->len - 2; + if (*num_descriptors > 4) + *num_descriptors = 4; + for (i = 0; i < *num_descriptors; i++) { + audio_format_id[i] = msg->msg[2 + i] >> 6; + audio_format_code[i] = msg->msg[2 + i] & 0x3f; + } +} + + +/* Audio Rate Control Feature */ +static inline void cec_msg_set_audio_rate(struct cec_msg *msg, + __u8 audio_rate) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_AUDIO_RATE; + msg->msg[2] = audio_rate; +} + +static inline void cec_ops_set_audio_rate(const struct cec_msg *msg, + __u8 *audio_rate) +{ + *audio_rate = msg->msg[2]; +} + + +/* Audio Return Channel Control Feature */ +static inline void cec_msg_report_arc_initiated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED; +} + +static inline void cec_msg_initiate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_INITIATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0; +} + +static inline void cec_msg_request_arc_initiation(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION; + msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0; +} + +static inline void cec_msg_report_arc_terminated(struct cec_msg *msg) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED; +} + +static inline void cec_msg_terminate_arc(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_TERMINATE_ARC; + msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0; +} + +static inline void cec_msg_request_arc_termination(struct cec_msg *msg, + bool reply) +{ + msg->len = 2; + msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION; + msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0; +} + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +static inline void cec_msg_report_current_latency(struct cec_msg *msg, + __u16 phys_addr, + __u8 video_latency, + __u8 low_latency_mode, + __u8 audio_out_compensated, + __u8 audio_out_delay) +{ + msg->len = 7; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->msg[4] = video_latency; + msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; + msg->msg[6] = audio_out_delay; +} + +static inline void cec_ops_report_current_latency(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *video_latency, + __u8 *low_latency_mode, + __u8 *audio_out_compensated, + __u8 *audio_out_delay) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *video_latency = msg->msg[4]; + *low_latency_mode = (msg->msg[5] >> 2) & 1; + *audio_out_compensated = msg->msg[5] & 3; + *audio_out_delay = msg->msg[6]; +} + +static inline void cec_msg_request_current_latency(struct cec_msg *msg, + bool reply, + __u16 phys_addr) +{ + msg->len = 4; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY; + msg->msg[2] = phys_addr >> 8; + msg->msg[3] = phys_addr & 0xff; + msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0; +} + +static inline void cec_ops_request_current_latency(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + + +/* Capability Discovery and Control Feature */ +static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2) +{ + msg->len = 9; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; +} + +static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; +} + +static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg, + __u16 target_phys_addr, + __u8 hec_func_state, + __u8 host_func_state, + __u8 enc_func_state, + __u8 cdc_errcode, + __u8 has_field, + __u16 hec_field) +{ + msg->len = has_field ? 10 : 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE; + msg->msg[5] = target_phys_addr >> 8; + msg->msg[6] = target_phys_addr & 0xff; + msg->msg[7] = (hec_func_state << 6) | + (host_func_state << 4) | + (enc_func_state << 2) | + cdc_errcode; + if (has_field) { + msg->msg[8] = hec_field >> 8; + msg->msg[9] = hec_field & 0xff; + } +} + +static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *target_phys_addr, + __u8 *hec_func_state, + __u8 *host_func_state, + __u8 *enc_func_state, + __u8 *cdc_errcode, + __u8 *has_field, + __u16 *hec_field) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6]; + *hec_func_state = msg->msg[7] >> 6; + *host_func_state = (msg->msg[7] >> 4) & 3; + *enc_func_state = (msg->msg[7] >> 4) & 3; + *cdc_errcode = msg->msg[7] & 3; + *has_field = msg->len >= 10; + *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0; +} + +static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u8 hec_set_state, + __u16 phys_addr3, + __u16 phys_addr4, + __u16 phys_addr5) +{ + msg->len = 10; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = hec_set_state; + if (phys_addr3 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr3 >> 8; + msg->msg[msg->len++] = phys_addr3 & 0xff; + if (phys_addr4 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr4 >> 8; + msg->msg[msg->len++] = phys_addr4 & 0xff; + if (phys_addr5 != CEC_PHYS_ADDR_INVALID) { + msg->msg[msg->len++] = phys_addr5 >> 8; + msg->msg[msg->len++] = phys_addr5 & 0xff; + } + } + } +} + +static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u8 *hec_set_state, + __u16 *phys_addr3, + __u16 *phys_addr4, + __u16 *phys_addr5) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *hec_set_state = msg->msg[9]; + *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID; + if (msg->len >= 12) + *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11]; + if (msg->len >= 14) + *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13]; + if (msg->len >= 16) + *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15]; +} + +static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg, + __u16 phys_addr1, + __u8 hec_set_state) +{ + msg->len = 8; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = hec_set_state; +} + +static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u8 *hec_set_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *hec_set_state = msg->msg[7]; +} + +static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg, + __u16 phys_addr1, + __u16 phys_addr2, + __u16 phys_addr3) +{ + msg->len = 11; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION; + msg->msg[5] = phys_addr1 >> 8; + msg->msg[6] = phys_addr1 & 0xff; + msg->msg[7] = phys_addr2 >> 8; + msg->msg[8] = phys_addr2 & 0xff; + msg->msg[9] = phys_addr3 >> 8; + msg->msg[10] = phys_addr3 & 0xff; +} + +static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg, + __u16 *phys_addr, + __u16 *phys_addr1, + __u16 *phys_addr2, + __u16 *phys_addr3) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; + *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; + *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10]; +} + +static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE; +} + +static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg) +{ + msg->len = 5; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER; +} + +static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg, + __u16 *phys_addr) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; +} + +static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg, + __u8 input_port, + __u8 hpd_state) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE; + msg->msg[5] = (input_port << 4) | hpd_state; +} + +static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *input_port, + __u8 *hpd_state) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *input_port = msg->msg[5] >> 4; + *hpd_state = msg->msg[5] & 0xf; +} + +static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg, + __u8 hpd_state, + __u8 hpd_error) +{ + msg->len = 6; + msg->msg[0] |= 0xf; /* broadcast */ + msg->msg[1] = CEC_MSG_CDC_MESSAGE; + /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ + msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE; + msg->msg[5] = (hpd_state << 4) | hpd_error; +} + +static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg, + __u16 *phys_addr, + __u8 *hpd_state, + __u8 *hpd_error) +{ + *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; + *hpd_state = msg->msg[5] >> 4; + *hpd_error = msg->msg[5] & 0xf; +} + +#endif diff --git a/include/linux/cec.h b/include/linux/cec.h new file mode 100644 index 000000000000..b3e22893a002 --- /dev/null +++ b/include/linux/cec.h @@ -0,0 +1,1011 @@ +/* + * cec - HDMI Consumer Electronics Control public header + * + * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * Note: this framework is still in staging and it is likely the API + * will change before it goes out of staging. + * + * Once it is moved out of staging this header will move to uapi. + */ +#ifndef _CEC_UAPI_H +#define _CEC_UAPI_H + +#include <linux/types.h> + +#define CEC_MAX_MSG_SIZE 16 + +/** + * struct cec_msg - CEC message structure. + * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message transmission has finished. + * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the + * driver when the message was received. + * @len: Length in bytes of the message. + * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE. + * Set to 0 if you want to wait forever. This timeout can also be + * used with CEC_TRANSMIT as the timeout for waiting for a reply. + * If 0, then it will use a 1 second timeout instead of waiting + * forever as is done with CEC_RECEIVE. + * @sequence: The framework assigns a sequence number to messages that are + * sent. This can be used to track replies to previously sent + * messages. + * @flags: Set to 0. + * @msg: The message payload. + * @reply: This field is ignored with CEC_RECEIVE and is only used by + * CEC_TRANSMIT. If non-zero, then wait for a reply with this + * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for + * a possible ABORT reply. If there was an error when sending the + * msg or FeatureAbort was returned, then reply is set to 0. + * If reply is non-zero upon return, then len/msg are set to + * the received message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to + * the received feature abort message. + * If reply is zero upon return and status has the + * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at + * all. If reply is non-zero for CEC_TRANSMIT and the message is a + * broadcast, then -EINVAL is returned. + * if reply is non-zero, then timeout is set to 1000 (the required + * maximum response time). + * @rx_status: The message receive status bits. Set by the driver. + * @tx_status: The message transmit status bits. Set by the driver. + * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver. + * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver. + * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the + * driver. + * @tx_error_cnt: The number of 'Error' events. Set by the driver. + */ +struct cec_msg { + __u64 tx_ts; + __u64 rx_ts; + __u32 len; + __u32 timeout; + __u32 sequence; + __u32 flags; + __u8 msg[CEC_MAX_MSG_SIZE]; + __u8 reply; + __u8 rx_status; + __u8 tx_status; + __u8 tx_arb_lost_cnt; + __u8 tx_nack_cnt; + __u8 tx_low_drive_cnt; + __u8 tx_error_cnt; +}; + +/** + * cec_msg_initiator - return the initiator's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_initiator(const struct cec_msg *msg) +{ + return msg->msg[0] >> 4; +} + +/** + * cec_msg_destination - return the destination's logical address. + * @msg: the message structure + */ +static inline __u8 cec_msg_destination(const struct cec_msg *msg) +{ + return msg->msg[0] & 0xf; +} + +/** + * cec_msg_opcode - return the opcode of the message, -1 for poll + * @msg: the message structure + */ +static inline int cec_msg_opcode(const struct cec_msg *msg) +{ + return msg->len > 1 ? msg->msg[1] : -1; +} + +/** + * cec_msg_is_broadcast - return true if this is a broadcast message. + * @msg: the message structure + */ +static inline bool cec_msg_is_broadcast(const struct cec_msg *msg) +{ + return (msg->msg[0] & 0xf) == 0xf; +} + +/** + * cec_msg_init - initialize the message structure. + * @msg: the message structure + * @initiator: the logical address of the initiator + * @destination:the logical address of the destination (0xf for broadcast) + * + * The whole structure is zeroed, the len field is set to 1 (i.e. a poll + * message) and the initiator and destination are filled in. + */ +static inline void cec_msg_init(struct cec_msg *msg, + __u8 initiator, __u8 destination) +{ + memset(msg, 0, sizeof(*msg)); + msg->msg[0] = (initiator << 4) | destination; + msg->len = 1; +} + +/** + * cec_msg_set_reply_to - fill in destination/initiator in a reply message. + * @msg: the message structure for the reply + * @orig: the original message structure + * + * Set the msg destination to the orig initiator and the msg initiator to the + * orig destination. Note that msg and orig may be the same pointer, in which + * case the change is done in place. + */ +static inline void cec_msg_set_reply_to(struct cec_msg *msg, + struct cec_msg *orig) +{ + /* The destination becomes the initiator and vice versa */ + msg->msg[0] = (cec_msg_destination(orig) << 4) | + cec_msg_initiator(orig); + msg->reply = msg->timeout = 0; +} + +/* cec status field */ +#define CEC_TX_STATUS_OK (1 << 0) +#define CEC_TX_STATUS_ARB_LOST (1 << 1) +#define CEC_TX_STATUS_NACK (1 << 2) +#define CEC_TX_STATUS_LOW_DRIVE (1 << 3) +#define CEC_TX_STATUS_ERROR (1 << 4) +#define CEC_TX_STATUS_MAX_RETRIES (1 << 5) + +#define CEC_RX_STATUS_OK (1 << 0) +#define CEC_RX_STATUS_TIMEOUT (1 << 1) +#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2) + +static inline bool cec_msg_status_is_ok(const struct cec_msg *msg) +{ + if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK)) + return false; + if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK)) + return false; + if (!msg->tx_status && !msg->rx_status) + return false; + return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT); +} + +#define CEC_LOG_ADDR_INVALID 0xff +#define CEC_PHYS_ADDR_INVALID 0xffff + +/* + * The maximum number of logical addresses one device can be assigned to. + * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The + * Analog Devices CEC hardware supports 3. So let's go wild and go for 4. + */ +#define CEC_MAX_LOG_ADDRS 4 + +/* The logical addresses defined by CEC 2.0 */ +#define CEC_LOG_ADDR_TV 0 +#define CEC_LOG_ADDR_RECORD_1 1 +#define CEC_LOG_ADDR_RECORD_2 2 +#define CEC_LOG_ADDR_TUNER_1 3 +#define CEC_LOG_ADDR_PLAYBACK_1 4 +#define CEC_LOG_ADDR_AUDIOSYSTEM 5 +#define CEC_LOG_ADDR_TUNER_2 6 +#define CEC_LOG_ADDR_TUNER_3 7 +#define CEC_LOG_ADDR_PLAYBACK_2 8 +#define CEC_LOG_ADDR_RECORD_3 9 +#define CEC_LOG_ADDR_TUNER_4 10 +#define CEC_LOG_ADDR_PLAYBACK_3 11 +#define CEC_LOG_ADDR_BACKUP_1 12 +#define CEC_LOG_ADDR_BACKUP_2 13 +#define CEC_LOG_ADDR_SPECIFIC 14 +#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */ +#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */ + +/* The logical address types that the CEC device wants to claim */ +#define CEC_LOG_ADDR_TYPE_TV 0 +#define CEC_LOG_ADDR_TYPE_RECORD 1 +#define CEC_LOG_ADDR_TYPE_TUNER 2 +#define CEC_LOG_ADDR_TYPE_PLAYBACK 3 +#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4 +#define CEC_LOG_ADDR_TYPE_SPECIFIC 5 +#define CEC_LOG_ADDR_TYPE_UNREGISTERED 6 +/* + * Switches should use UNREGISTERED. + * Processors should use SPECIFIC. + */ + +#define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV) +#define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \ + (1 << CEC_LOG_ADDR_RECORD_2) | \ + (1 << CEC_LOG_ADDR_RECORD_3)) +#define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \ + (1 << CEC_LOG_ADDR_TUNER_2) | \ + (1 << CEC_LOG_ADDR_TUNER_3) | \ + (1 << CEC_LOG_ADDR_TUNER_4)) +#define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_2) | \ + (1 << CEC_LOG_ADDR_PLAYBACK_3)) +#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM) +#define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \ + (1 << CEC_LOG_ADDR_BACKUP_2)) +#define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC) +#define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED) + +static inline bool cec_has_tv(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TV; +} + +static inline bool cec_has_record(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD; +} + +static inline bool cec_has_tuner(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER; +} + +static inline bool cec_has_playback(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK; +} + +static inline bool cec_has_audiosystem(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM; +} + +static inline bool cec_has_backup(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP; +} + +static inline bool cec_has_specific(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC; +} + +static inline bool cec_is_unregistered(__u16 log_addr_mask) +{ + return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED; +} + +static inline bool cec_is_unconfigured(__u16 log_addr_mask) +{ + return log_addr_mask == 0; +} + +/* + * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID + * should be disabled (CEC_S_VENDOR_ID) + */ +#define CEC_VENDOR_ID_NONE 0xffffffff + +/* The message handling modes */ +/* Modes for initiator */ +#define CEC_MODE_NO_INITIATOR (0x0 << 0) +#define CEC_MODE_INITIATOR (0x1 << 0) +#define CEC_MODE_EXCL_INITIATOR (0x2 << 0) +#define CEC_MODE_INITIATOR_MSK 0x0f + +/* Modes for follower */ +#define CEC_MODE_NO_FOLLOWER (0x0 << 4) +#define CEC_MODE_FOLLOWER (0x1 << 4) +#define CEC_MODE_EXCL_FOLLOWER (0x2 << 4) +#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4) +#define CEC_MODE_MONITOR (0xe << 4) +#define CEC_MODE_MONITOR_ALL (0xf << 4) +#define CEC_MODE_FOLLOWER_MSK 0xf0 + +/* Userspace has to configure the physical address */ +#define CEC_CAP_PHYS_ADDR (1 << 0) +/* Userspace has to configure the logical addresses */ +#define CEC_CAP_LOG_ADDRS (1 << 1) +/* Userspace can transmit messages (and thus become follower as well) */ +#define CEC_CAP_TRANSMIT (1 << 2) +/* + * Passthrough all messages instead of processing them. + */ +#define CEC_CAP_PASSTHROUGH (1 << 3) +/* Supports remote control */ +#define CEC_CAP_RC (1 << 4) +/* Hardware can monitor all messages, not just directed and broadcast. */ +#define CEC_CAP_MONITOR_ALL (1 << 5) + +/** + * struct cec_caps - CEC capabilities structure. + * @driver: name of the CEC device driver. + * @name: name of the CEC device. @driver + @name must be unique. + * @available_log_addrs: number of available logical addresses. + * @capabilities: capabilities of the CEC adapter. + * @version: version of the CEC adapter framework. + */ +struct cec_caps { + char driver[32]; + char name[32]; + __u32 available_log_addrs; + __u32 capabilities; + __u32 version; +}; + +/** + * struct cec_log_addrs - CEC logical addresses structure. + * @log_addr: the claimed logical addresses. Set by the driver. + * @log_addr_mask: current logical address mask. Set by the driver. + * @cec_version: the CEC version that the adapter should implement. Set by the + * caller. + * @num_log_addrs: how many logical addresses should be claimed. Set by the + * caller. + * @vendor_id: the vendor ID of the device. Set by the caller. + * @flags: set to 0. + * @osd_name: the OSD name of the device. Set by the caller. + * @primary_device_type: the primary device type for each logical address. + * Set by the caller. + * @log_addr_type: the logical address types. Set by the caller. + * @all_device_types: CEC 2.0: all device types represented by the logical + * address. Set by the caller. + * @features: CEC 2.0: The logical address features. Set by the caller. + */ +struct cec_log_addrs { + __u8 log_addr[CEC_MAX_LOG_ADDRS]; + __u16 log_addr_mask; + __u8 cec_version; + __u8 num_log_addrs; + __u32 vendor_id; + __u32 flags; + char osd_name[15]; + __u8 primary_device_type[CEC_MAX_LOG_ADDRS]; + __u8 log_addr_type[CEC_MAX_LOG_ADDRS]; + + /* CEC 2.0 */ + __u8 all_device_types[CEC_MAX_LOG_ADDRS]; + __u8 features[CEC_MAX_LOG_ADDRS][12]; +}; + +/* Events */ + +/* Event that occurs when the adapter state changes */ +#define CEC_EVENT_STATE_CHANGE 1 +/* + * This event is sent when messages are lost because the application + * didn't empty the message queue in time + */ +#define CEC_EVENT_LOST_MSGS 2 + +#define CEC_EVENT_FL_INITIAL_STATE (1 << 0) + +/** + * struct cec_event_state_change - used when the CEC adapter changes state. + * @phys_addr: the current physical address + * @log_addr_mask: the current logical address mask + */ +struct cec_event_state_change { + __u16 phys_addr; + __u16 log_addr_mask; +}; + +/** + * struct cec_event_lost_msgs - tells you how many messages were lost due. + * @lost_msgs: how many messages were lost. + */ +struct cec_event_lost_msgs { + __u32 lost_msgs; +}; + +/** + * struct cec_event - CEC event structure + * @ts: the timestamp of when the event was sent. + * @event: the event. + * array. + * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. + * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. + * @raw: array to pad the union. + */ +struct cec_event { + __u64 ts; + __u32 event; + __u32 flags; + union { + struct cec_event_state_change state_change; + struct cec_event_lost_msgs lost_msgs; + __u32 raw[16]; + }; +}; + +/* ioctls */ + +/* Adapter capabilities */ +#define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps) + +/* + * phys_addr is either 0 (if this is the CEC root device) + * or a valid physical address obtained from the sink's EDID + * as read by this CEC device (if this is a source device) + * or a physical address obtained and modified from a sink + * EDID and used for a sink CEC device. + * If nothing is connected, then phys_addr is 0xffff. + * See HDMI 1.4b, section 8.7 (Physical Address). + * + * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled + * internally. + */ +#define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16) +#define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16) + +/* + * Configure the CEC adapter. It sets the device type and which + * logical types it will try to claim. It will return which + * logical addresses it could actually claim. + * An error is returned if the adapter is disabled or if there + * is no physical address assigned. + */ + +#define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs) +#define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs) + +/* Transmit/receive a CEC command */ +#define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg) +#define CEC_RECEIVE _IOWR('a', 6, struct cec_msg) + +/* Dequeue CEC events */ +#define CEC_DQEVENT _IOWR('a', 7, struct cec_event) + +/* + * Get and set the message handling mode for this filehandle. + */ +#define CEC_G_MODE _IOR('a', 8, __u32) +#define CEC_S_MODE _IOW('a', 9, __u32) + +/* + * The remainder of this header defines all CEC messages and operands. + * The format matters since it the cec-ctl utility parses it to generate + * code for implementing all these messages. + * + * Comments ending with 'Feature' group messages for each feature. + * If messages are part of multiple features, then the "Has also" + * comment is used to list the previously defined messages that are + * supported by the feature. + * + * Before operands are defined a comment is added that gives the + * name of the operand and in brackets the variable name of the + * corresponding argument in the cec-funcs.h function. + */ + +/* Messages */ + +/* One Touch Play Feature */ +#define CEC_MSG_ACTIVE_SOURCE 0x82 +#define CEC_MSG_IMAGE_VIEW_ON 0x04 +#define CEC_MSG_TEXT_VIEW_ON 0x0d + + +/* Routing Control Feature */ + +/* + * Has also: + * CEC_MSG_ACTIVE_SOURCE + */ + +#define CEC_MSG_INACTIVE_SOURCE 0x9d +#define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85 +#define CEC_MSG_ROUTING_CHANGE 0x80 +#define CEC_MSG_ROUTING_INFORMATION 0x81 +#define CEC_MSG_SET_STREAM_PATH 0x86 + + +/* Standby Feature */ +#define CEC_MSG_STANDBY 0x36 + + +/* One Touch Record Feature */ +#define CEC_MSG_RECORD_OFF 0x0b +#define CEC_MSG_RECORD_ON 0x09 +/* Record Source Type Operand (rec_src_type) */ +#define CEC_OP_RECORD_SRC_OWN 1 +#define CEC_OP_RECORD_SRC_DIGITAL 2 +#define CEC_OP_RECORD_SRC_ANALOG 3 +#define CEC_OP_RECORD_SRC_EXT_PLUG 4 +#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5 +/* Service Identification Method Operand (service_id_method) */ +#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0 +#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1 +/* Digital Service Broadcast System Operand (dig_bcast_system) */ +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19 +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a +#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b +/* Analogue Broadcast Type Operand (ana_bcast_type) */ +#define CEC_OP_ANA_BCAST_TYPE_CABLE 0 +#define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1 +#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2 +/* Broadcast System Operand (bcast_system) */ +#define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00 +#define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */ +#define CEC_OP_BCAST_SYSTEM_PAL_M 0x02 +#define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03 +#define CEC_OP_BCAST_SYSTEM_PAL_I 0x04 +#define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05 +#define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06 +#define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07 +#define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08 +#define CEC_OP_BCAST_SYSTEM_OTHER 0x1f +/* Channel Number Format Operand (channel_number_fmt) */ +#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01 +#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02 + +#define CEC_MSG_RECORD_STATUS 0x0a +/* Record Status Operand (rec_status) */ +#define CEC_OP_RECORD_STATUS_CUR_SRC 0x01 +#define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02 +#define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03 +#define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04 +#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05 +#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06 +#define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09 +#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a +#define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b +#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c +#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d +#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e +#define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10 +#define CEC_OP_RECORD_STATUS_PLAYING 0x11 +#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12 +#define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13 +#define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14 +#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15 +#define CEC_OP_RECORD_STATUS_NO_SPACE 0x16 +#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17 +#define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a +#define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b +#define CEC_OP_RECORD_STATUS_OTHER 0x1f + +#define CEC_MSG_RECORD_TV_SCREEN 0x0f + + +/* Timer Programming Feature */ +#define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33 +/* Recording Sequence Operand (recording_seq) */ +#define CEC_OP_REC_SEQ_SUNDAY 0x01 +#define CEC_OP_REC_SEQ_MONDAY 0x02 +#define CEC_OP_REC_SEQ_TUESDAY 0x04 +#define CEC_OP_REC_SEQ_WEDNESDAY 0x08 +#define CEC_OP_REC_SEQ_THURSDAY 0x10 +#define CEC_OP_REC_SEQ_FRIDAY 0x20 +#define CEC_OP_REC_SEQ_SATERDAY 0x40 +#define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 + +#define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 + +#define CEC_MSG_CLEAR_EXT_TIMER 0xa1 +/* External Source Specifier Operand (ext_src_spec) */ +#define CEC_OP_EXT_SRC_PLUG 0x04 +#define CEC_OP_EXT_SRC_PHYS_ADDR 0x05 + +#define CEC_MSG_SET_ANALOGUE_TIMER 0x34 +#define CEC_MSG_SET_DIGITAL_TIMER 0x97 +#define CEC_MSG_SET_EXT_TIMER 0xa2 + +#define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67 +#define CEC_MSG_TIMER_CLEARED_STATUS 0x43 +/* Timer Cleared Status Data Operand (timer_cleared_status) */ +#define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00 +#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01 +#define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02 +#define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80 + +#define CEC_MSG_TIMER_STATUS 0x35 +/* Timer Overlap Warning Operand (timer_overlap_warning) */ +#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0 +#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1 +/* Media Info Operand (media_info) */ +#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0 +#define CEC_OP_MEDIA_INFO_PROT_MEDIA 1 +#define CEC_OP_MEDIA_INFO_NO_MEDIA 2 +/* Programmed Indicator Operand (prog_indicator) */ +#define CEC_OP_PROG_IND_NOT_PROGRAMMED 0 +#define CEC_OP_PROG_IND_PROGRAMMED 1 +/* Programmed Info Operand (prog_info) */ +#define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08 +#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09 +#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b +#define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a +/* Not Programmed Error Info Operand (prog_error) */ +#define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01 +#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02 +#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03 +#define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04 +#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05 +#define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06 +#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07 +#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08 +#define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09 +#define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a +#define CEC_OP_PROG_ERROR_DUPLICATE 0x0e + + +/* System Information Feature */ +#define CEC_MSG_CEC_VERSION 0x9e +/* CEC Version Operand (cec_version) */ +#define CEC_OP_CEC_VERSION_1_3A 4 +#define CEC_OP_CEC_VERSION_1_4 5 +#define CEC_OP_CEC_VERSION_2_0 6 + +#define CEC_MSG_GET_CEC_VERSION 0x9f +#define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83 +#define CEC_MSG_GET_MENU_LANGUAGE 0x91 +#define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84 +/* Primary Device Type Operand (prim_devtype) */ +#define CEC_OP_PRIM_DEVTYPE_TV 0 +#define CEC_OP_PRIM_DEVTYPE_RECORD 1 +#define CEC_OP_PRIM_DEVTYPE_TUNER 3 +#define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4 +#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5 +#define CEC_OP_PRIM_DEVTYPE_SWITCH 6 +#define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7 + +#define CEC_MSG_SET_MENU_LANGUAGE 0x32 +#define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */ +/* All Device Types Operand (all_device_types) */ +#define CEC_OP_ALL_DEVTYPE_TV 0x80 +#define CEC_OP_ALL_DEVTYPE_RECORD 0x40 +#define CEC_OP_ALL_DEVTYPE_TUNER 0x20 +#define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10 +#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08 +#define CEC_OP_ALL_DEVTYPE_SWITCH 0x04 +/* + * And if you wondering what happened to PROCESSOR devices: those should + * be mapped to a SWITCH. + */ + +/* Valid for RC Profile and Device Feature operands */ +#define CEC_OP_FEAT_EXT 0x80 /* Extension bit */ +/* RC Profile Operand (rc_profile) */ +#define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00 +#define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02 +#define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06 +#define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a +#define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50 +#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48 +#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42 +#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41 +/* Device Feature Operand (dev_features) */ +#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40 +#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20 +#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10 +#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 +#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 +#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 + +#define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ + + +/* Deck Control Feature */ +#define CEC_MSG_DECK_CONTROL 0x42 +/* Deck Control Mode Operand (deck_control_mode) */ +#define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1 +#define CEC_OP_DECK_CTL_MODE_SKIP_REV 2 +#define CEC_OP_DECK_CTL_MODE_STOP 3 +#define CEC_OP_DECK_CTL_MODE_EJECT 4 + +#define CEC_MSG_DECK_STATUS 0x1b +/* Deck Info Operand (deck_info) */ +#define CEC_OP_DECK_INFO_PLAY 0x11 +#define CEC_OP_DECK_INFO_RECORD 0x12 +#define CEC_OP_DECK_INFO_PLAY_REV 0x13 +#define CEC_OP_DECK_INFO_STILL 0x14 +#define CEC_OP_DECK_INFO_SLOW 0x15 +#define CEC_OP_DECK_INFO_SLOW_REV 0x16 +#define CEC_OP_DECK_INFO_FAST_FWD 0x17 +#define CEC_OP_DECK_INFO_FAST_REV 0x18 +#define CEC_OP_DECK_INFO_NO_MEDIA 0x19 +#define CEC_OP_DECK_INFO_STOP 0x1a +#define CEC_OP_DECK_INFO_SKIP_FWD 0x1b +#define CEC_OP_DECK_INFO_SKIP_REV 0x1c +#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d +#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e +#define CEC_OP_DECK_INFO_OTHER 0x1f + +#define CEC_MSG_GIVE_DECK_STATUS 0x1a +/* Status Request Operand (status_req) */ +#define CEC_OP_STATUS_REQ_ON 1 +#define CEC_OP_STATUS_REQ_OFF 2 +#define CEC_OP_STATUS_REQ_ONCE 3 + +#define CEC_MSG_PLAY 0x41 +/* Play Mode Operand (play_mode) */ +#define CEC_OP_PLAY_MODE_PLAY_FWD 0x24 +#define CEC_OP_PLAY_MODE_PLAY_REV 0x20 +#define CEC_OP_PLAY_MODE_PLAY_STILL 0x25 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06 +#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09 +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a +#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19 +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a +#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b + + +/* Tuner Control Feature */ +#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08 +#define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92 +#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 +#define CEC_MSG_TUNER_DEVICE_STATUS 0x07 +/* Recording Flag Operand (rec_flag) */ +#define CEC_OP_REC_FLAG_USED 0 +#define CEC_OP_REC_FLAG_NOT_USED 1 +/* Tuner Display Info Operand (tuner_display_info) */ +#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 +#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 +#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2 + +#define CEC_MSG_TUNER_STEP_DECREMENT 0x06 +#define CEC_MSG_TUNER_STEP_INCREMENT 0x05 + + +/* Vendor Specific Commands Feature */ + +/* + * Has also: + * CEC_MSG_CEC_VERSION + * CEC_MSG_GET_CEC_VERSION + */ +#define CEC_MSG_DEVICE_VENDOR_ID 0x87 +#define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c +#define CEC_MSG_VENDOR_COMMAND 0x89 +#define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0 +#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a +#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b + + +/* OSD Display Feature */ +#define CEC_MSG_SET_OSD_STRING 0x64 +/* Display Control Operand (disp_ctl) */ +#define CEC_OP_DISP_CTL_DEFAULT 0x00 +#define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40 +#define CEC_OP_DISP_CTL_CLEAR 0x80 + + +/* Device OSD Transfer Feature */ +#define CEC_MSG_GIVE_OSD_NAME 0x46 +#define CEC_MSG_SET_OSD_NAME 0x47 + + +/* Device Menu Control Feature */ +#define CEC_MSG_MENU_REQUEST 0x8d +/* Menu Request Type Operand (menu_req) */ +#define CEC_OP_MENU_REQUEST_ACTIVATE 0x00 +#define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01 +#define CEC_OP_MENU_REQUEST_QUERY 0x02 + +#define CEC_MSG_MENU_STATUS 0x8e +/* Menu State Operand (menu_state) */ +#define CEC_OP_MENU_STATE_ACTIVATED 0x00 +#define CEC_OP_MENU_STATE_DEACTIVATED 0x01 + +#define CEC_MSG_USER_CONTROL_PRESSED 0x44 +/* UI Broadcast Type Operand (ui_bcast_type) */ +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00 +#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30 +#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90 +#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91 +#define CEC_OP_UI_BCAST_TYPE_IP 0xa0 +/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */ +#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10 +#define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20 +#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80 +#define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90 +#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0 +#define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1 +#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2 +#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2 +#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3 + +#define CEC_MSG_USER_CONTROL_RELEASED 0x45 + + +/* Remote Control Passthrough Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ + + +/* Power Status Feature */ +#define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f +#define CEC_MSG_REPORT_POWER_STATUS 0x90 +/* Power Status Operand (pwr_state) */ +#define CEC_OP_POWER_STATUS_ON 0 +#define CEC_OP_POWER_STATUS_STANDBY 1 +#define CEC_OP_POWER_STATUS_TO_ON 2 +#define CEC_OP_POWER_STATUS_TO_STANDBY 3 + + +/* General Protocol Messages */ +#define CEC_MSG_FEATURE_ABORT 0x00 +/* Abort Reason Operand (reason) */ +#define CEC_OP_ABORT_UNRECOGNIZED_OP 0 +#define CEC_OP_ABORT_INCORRECT_MODE 1 +#define CEC_OP_ABORT_NO_SOURCE 2 +#define CEC_OP_ABORT_INVALID_OP 3 +#define CEC_OP_ABORT_REFUSED 4 +#define CEC_OP_ABORT_UNDETERMINED 5 + +#define CEC_MSG_ABORT 0xff + + +/* System Audio Control Feature */ + +/* + * Has also: + * CEC_MSG_USER_CONTROL_PRESSED + * CEC_MSG_USER_CONTROL_RELEASED + */ +#define CEC_MSG_GIVE_AUDIO_STATUS 0x71 +#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d +#define CEC_MSG_REPORT_AUDIO_STATUS 0x7a +/* Audio Mute Status Operand (aud_mute_status) */ +#define CEC_OP_AUD_MUTE_STATUS_OFF 0 +#define CEC_OP_AUD_MUTE_STATUS_ON 1 + +#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3 +#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4 +#define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72 +/* System Audio Status Operand (sys_aud_status) */ +#define CEC_OP_SYS_AUD_STATUS_OFF 0 +#define CEC_OP_SYS_AUD_STATUS_ON 1 + +#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70 +#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e +/* Audio Format ID Operand (audio_format_id) */ +#define CEC_OP_AUD_FMT_ID_CEA861 0 +#define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 + + +/* Audio Rate Control Feature */ +#define CEC_MSG_SET_AUDIO_RATE 0x9a +/* Audio Rate Operand (audio_rate) */ +#define CEC_OP_AUD_RATE_OFF 0 +#define CEC_OP_AUD_RATE_WIDE_STD 1 +#define CEC_OP_AUD_RATE_WIDE_FAST 2 +#define CEC_OP_AUD_RATE_WIDE_SLOW 3 +#define CEC_OP_AUD_RATE_NARROW_STD 4 +#define CEC_OP_AUD_RATE_NARROW_FAST 5 +#define CEC_OP_AUD_RATE_NARROW_SLOW 6 + + +/* Audio Return Channel Control Feature */ +#define CEC_MSG_INITIATE_ARC 0xc0 +#define CEC_MSG_REPORT_ARC_INITIATED 0xc1 +#define CEC_MSG_REPORT_ARC_TERMINATED 0xc2 +#define CEC_MSG_REQUEST_ARC_INITIATION 0xc3 +#define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4 +#define CEC_MSG_TERMINATE_ARC 0xc5 + + +/* Dynamic Audio Lipsync Feature */ +/* Only for CEC 2.0 and up */ +#define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7 +#define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8 +/* Low Latency Mode Operand (low_latency_mode) */ +#define CEC_OP_LOW_LATENCY_MODE_OFF 0 +#define CEC_OP_LOW_LATENCY_MODE_ON 1 +/* Audio Output Compensated Operand (audio_out_compensated) */ +#define CEC_OP_AUD_OUT_COMPENSATED_NA 0 +#define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1 +#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2 +#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3 + + +/* Capability Discovery and Control Feature */ +#define CEC_MSG_CDC_MESSAGE 0xf8 +/* Ethernet-over-HDMI: nobody ever does this... */ +#define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00 +#define CEC_MSG_CDC_HEC_REPORT_STATE 0x01 +/* HEC Functionality State Operand (hec_func_state) */ +#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HEC_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HEC_FUNC_STATE_ACTIVE 2 +#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3 +/* Host Functionality State Operand (host_func_state) */ +#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0 +#define CEC_OP_HOST_FUNC_STATE_INACTIVE 1 +#define CEC_OP_HOST_FUNC_STATE_ACTIVE 2 +/* ENC Functionality State Operand (enc_func_state) */ +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1 +#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2 +/* CDC Error Code Operand (cdc_errcode) */ +#define CEC_OP_CDC_ERROR_CODE_NONE 0 +#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1 +#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2 +#define CEC_OP_CDC_ERROR_CODE_OTHER 3 +/* HEC Support Operand (hec_support) */ +#define CEC_OP_HEC_SUPPORT_NO 0 +#define CEC_OP_HEC_SUPPORT_YES 1 +/* HEC Activation Operand (hec_activation) */ +#define CEC_OP_HEC_ACTIVATION_ON 0 +#define CEC_OP_HEC_ACTIVATION_OFF 1 + +#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02 +#define CEC_MSG_CDC_HEC_SET_STATE 0x03 +/* HEC Set State Operand (hec_set_state) */ +#define CEC_OP_HEC_SET_STATE_DEACTIVATE 0 +#define CEC_OP_HEC_SET_STATE_ACTIVATE 1 + +#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04 +#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05 +#define CEC_MSG_CDC_HEC_DISCOVER 0x06 +/* Hotplug Detect messages */ +#define CEC_MSG_CDC_HPD_SET_STATE 0x10 +/* HPD State Operand (hpd_state) */ +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0 +#define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1 +#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2 +#define CEC_OP_HPD_STATE_EDID_DISABLE 3 +#define CEC_OP_HPD_STATE_EDID_ENABLE 4 +#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5 +#define CEC_MSG_CDC_HPD_REPORT_STATE 0x11 +/* HPD Error Code Operand (hpd_error) */ +#define CEC_OP_HPD_ERROR_NONE 0 +#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1 +#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2 +#define CEC_OP_HPD_ERROR_OTHER 3 +#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4 + +#endif diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a20320c666fd..984f73b719a9 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -87,6 +87,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); struct cgroup *cgroup_get_from_path(const char *path); +struct cgroup *cgroup_get_from_fd(int fd); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); diff --git a/include/linux/clk.h b/include/linux/clk.h index 0df4a51e1a78..834179f3fa72 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk) return NULL; } +static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + return NULL; +} #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 44a1aff22566..08398182f56e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *, extern int clocksource_i8253_init(void); #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - OF_DECLARE_1(clksrc, name, compat, fn) + OF_DECLARE_1_RET(clksrc, name, compat, fn) #ifdef CONFIG_CLKSRC_PROBE extern void clocksource_probe(void); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a58c852a268f..d4e106b5dc27 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -1,6 +1,18 @@ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H +/* + * Determines how hard direct compaction should try to succeed. + * Lower value means higher priority, analogically to reclaim priority. + */ +enum compact_priority { + COMPACT_PRIO_SYNC_LIGHT, + MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + COMPACT_PRIO_ASYNC, + INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC +}; + /* Return values for compact_zone() and try_to_compact_pages() */ /* When adding new states, please adjust include/trace/events/compaction.h */ enum compact_result { @@ -43,14 +55,6 @@ enum compact_result { COMPACT_PARTIAL, }; -/* Used to signal whether compaction detected need_sched() or lock contention */ -/* No contention detected */ -#define COMPACT_CONTENDED_NONE 0 -/* Either need_sched() was true or fatal signal pending */ -#define COMPACT_CONTENDED_SCHED 1 -/* Zone lock or lru_lock was contended in async compaction */ -#define COMPACT_CONTENDED_LOCK 2 - struct alloc_context; /* in mm/internal.h */ #ifdef CONFIG_COMPACTION @@ -64,9 +68,8 @@ extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, - unsigned int order, - unsigned int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, int *contended); + unsigned int order, unsigned int alloc_flags, + const struct alloc_context *ac, enum compact_priority prio); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, @@ -151,14 +154,6 @@ extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else -static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask, - unsigned int order, int alloc_flags, - const struct alloc_context *ac, - enum migrate_mode mode, int *contended) -{ - return COMPACT_CONTINUE; -} - static inline void compact_pgdat(pg_data_t *pgdat, int order) { } @@ -212,6 +207,7 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 793c0829e3a3..1bb954842725 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -17,7 +17,6 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) -# define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) #else /* CONFIG_SPARSE_RCU_POINTER */ @@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __cond_lock(x,c) (c) # define __percpu # define __rcu -# define __pmem # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) #endif /* __CHECKER__ */ @@ -304,23 +302,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering - * @cond: boolean expression to wait for - * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. - * - * The control dependency provides a LOAD->STORE order, the additional RMB - * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, - * aka. ACQUIRE. - */ -#define smp_cond_acquire(cond) do { \ - while (!(cond)) \ - cpu_relax(); \ - smp_rmb(); /* ctrl + rmb := acquire */ \ -} while (0) - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -545,10 +526,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * Similar to rcu_dereference(), but for situations where the pointed-to * object's lifetime is managed by something other than RCU. That * "something other" might be reference counting or simple immortality. + * + * The seemingly unused void * variable is to validate @p is indeed a pointer + * type. All pointer types silently cast to void *. */ #define lockless_dereference(p) \ ({ \ typeof(p) _________p1 = READ_ONCE(p); \ + __maybe_unused const void * const _________p2 = _________p1; \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) diff --git a/include/linux/console.h b/include/linux/console.h index 98c8615dc300..d530c4627e54 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -28,6 +28,13 @@ struct tty_struct; #define VT100ID "\033[?1;2c" #define VT102ID "\033[?6c" +/** + * struct consw - callbacks for consoles + * + * @con_set_palette: sets the palette of the console to @table (optional) + * @con_scrolldelta: the contents of the console should be scrolled by @lines. + * Invoked by user. (optional) + */ struct consw { struct module *owner; const char *(*con_startup)(void); @@ -38,7 +45,6 @@ struct consw { void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); void (*con_cursor)(struct vc_data *, int); int (*con_scroll)(struct vc_data *, int, int, int, int); - void (*con_bmove)(struct vc_data *, int, int, int, int, int, int); int (*con_switch)(struct vc_data *); int (*con_blank)(struct vc_data *, int, int); int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); @@ -47,8 +53,9 @@ struct consw { int (*con_font_copy)(struct vc_data *, int); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); - int (*con_set_palette)(struct vc_data *, const unsigned char *); - int (*con_scrolldelta)(struct vc_data *, int); + void (*con_set_palette)(struct vc_data *, + const unsigned char *table); + void (*con_scrolldelta)(struct vc_data *, int lines); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e329ee2667e1..6fd3c908a340 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,38 @@ struct uni_pagedir; #define NPAR 16 +/* + * Example: vc_data of a console that was scrolled 3 lines down. + * + * Console buffer + * vc_screenbuf ---------> +----------------------+-. + * | initializing W | \ + * | initializing X | | + * | initializing Y | > scroll-back area + * | initializing Z | | + * | | / + * vc_visible_origin ---> ^+----------------------+-: + * (changes by scroll) || Welcome to linux | \ + * || | | + * vc_rows --->< | login: root | | visible on console + * || password: | > (vc_screenbuf_size is + * vc_origin -----------> || | | vc_size_row * vc_rows) + * (start when no scroll) || Last login: 12:28 | / + * v+----------------------+-: + * | Have a lot of fun... | \ + * vc_pos -----------------|--------v | > scroll-front area + * | ~ # cat_ | / + * vc_scr_end -----------> +----------------------+-: + * (vc_origin + | | \ EMPTY, to be filled by + * vc_screenbuf_size) | | / vc_video_erase_char + * +----------------------+-' + * <---- 2 * vc_cols -----> + * <---- vc_size_row -----> + * + * Note that every character in the console buffer is accompanied with an + * attribute in the buffer right after the character. This is not depicted + * in the figure. + */ struct vc_data { struct tty_port port; /* Upper level data */ @@ -74,7 +106,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ /* attribute flags */ unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ unsigned int vc_italic:1; @@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work); #define CUR_DEFAULT CUR_UNDERLINE -#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp) +static inline bool con_is_visible(const struct vc_data *vc) +{ + return *vc->vc_display_fg == vc; +} #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d259274238db..d9aef2a0ec8e 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -31,6 +31,19 @@ static inline void user_exit(void) context_tracking_exit(CONTEXT_USER); } +/* Called with interrupts disabled. */ +static inline void user_enter_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_USER); + +} +static inline void user_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_USER); +} + static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void) #else static inline void user_enter(void) { } static inline void user_exit(void) { } +static inline void user_enter_irqoff(void) { } +static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4e81e08db752..631ba33bbe9f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -36,6 +36,12 @@ struct cpufreq_governor; +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED, + CPUFREQ_TABLE_SORTED_ASCENDING, + CPUFREQ_TABLE_SORTED_DESCENDING +}; + struct cpufreq_freqs { unsigned int cpu; /* cpu nr */ unsigned int old; @@ -87,6 +93,7 @@ struct cpufreq_policy { struct cpufreq_user_policy user_policy; struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; struct list_head policy_list; struct kobject kobj; @@ -113,6 +120,10 @@ struct cpufreq_policy { bool fast_switch_possible; bool fast_switch_enabled; + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ + unsigned int cached_target_freq; + int cached_resolved_idx; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -185,6 +196,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) static inline void disable_cpufreq(void) { } #endif +#ifdef CONFIG_CPU_FREQ_STAT +void cpufreq_stats_create_table(struct cpufreq_policy *policy); +void cpufreq_stats_free_table(struct cpufreq_policy *policy); +void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq); +#else +static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq) { } +#endif /* CONFIG_CPU_FREQ_STAT */ + /********************************************************************* * CPUFREQ DRIVER INTERFACE * *********************************************************************/ @@ -251,6 +274,16 @@ struct cpufreq_driver { unsigned int index); unsigned int (*fast_switch)(struct cpufreq_policy *policy, unsigned int target_freq); + + /* + * Caches and returns the lowest driver-supported frequency greater than + * or equal to the target frequency, subject to any driver limitations. + * Does not set the frequency. Only to be implemented for drivers with + * target(). + */ + unsigned int (*resolve_freq)(struct cpufreq_policy *policy, + unsigned int target_freq); + /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -455,18 +488,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define MIN_LATENCY_MULTIPLIER (20) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) -/* Governor Events */ -#define CPUFREQ_GOV_START 1 -#define CPUFREQ_GOV_STOP 2 -#define CPUFREQ_GOV_LIMITS 3 -#define CPUFREQ_GOV_POLICY_INIT 4 -#define CPUFREQ_GOV_POLICY_EXIT 5 - struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; - int initialized; - int (*governor) (struct cpufreq_policy *policy, - unsigned int event); + int (*init)(struct cpufreq_policy *policy); + void (*exit)(struct cpufreq_policy *policy); + int (*start)(struct cpufreq_policy *policy); + void (*stop)(struct cpufreq_policy *policy); + void (*limits)(struct cpufreq_policy *policy); ssize_t (*show_setspeed) (struct cpufreq_policy *policy, char *buf); int (*store_setspeed) (struct cpufreq_policy *policy, @@ -487,12 +515,22 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) +{ + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); +} + /* Governor attribute set */ struct gov_attr_set { struct kobject kobj; @@ -582,11 +620,9 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); -int cpufreq_frequency_table_target(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table, - unsigned int target_freq, - unsigned int relation, - unsigned int *index); +int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq); @@ -597,6 +633,227 @@ int cpufreq_boost_trigger_state(int state); int cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); + +/* Find lowest freq at or above target in a table in ascending order */ +static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq >= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Find lowest freq at or above target in a table in descending order */ +static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_al(policy, target_freq); + else + return cpufreq_table_find_index_dl(policy, target_freq); +} + +/* Find highest freq at or below target in a table in ascending order */ +static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Find highest freq at or below target in a table in descending order */ +static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq <= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ah(policy, target_freq); + else + return cpufreq_table_find_index_dh(policy, target_freq); +} + +/* Find closest freq to target in a table in ascending order */ +static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return i; + + return best; + } + + return best; +} + +/* Find closest freq to target in a table in descending order */ +static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ac(policy, target_freq); + else + return cpufreq_table_find_index_dc(policy, target_freq); +} + +static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) + return cpufreq_table_index_unsorted(policy, target_freq, + relation); + + switch (relation) { + case CPUFREQ_RELATION_L: + return cpufreq_table_find_index_l(policy, target_freq); + case CPUFREQ_RELATION_H: + return cpufreq_table_find_index_h(policy, target_freq); + case CPUFREQ_RELATION_C: + return cpufreq_table_find_index_c(policy, target_freq); + default: + pr_err("%s: Invalid relation: %d\n", __func__, relation); + return -EINVAL; + } +} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -617,8 +874,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } #endif -/* the following funtion is for cpufreq core use only */ -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 07b83d32f66c..bb31373c3478 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -252,4 +252,22 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) #define CPUIDLE_DRIVER_STATE_START 0 #endif +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ +({ \ + int __ret; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ +}) + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 6e28c895c376..7cee5551625b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -47,16 +47,18 @@ #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 -#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 -#define CRYPTO_ALG_TYPE_HASH 0x00000008 -#define CRYPTO_ALG_TYPE_SHASH 0x00000009 -#define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d +#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e +#define CRYPTO_ALG_TYPE_HASH 0x0000000e +#define CRYPTO_ALG_TYPE_SHASH 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 @@ -486,8 +488,6 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); struct crypto_ablkcipher *base; @@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask) * state information is unused by the kernel crypto API. */ -/** - * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * ablkcipher cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for an ablkcipher. The returned struct - * crypto_ablkcipher is the cipher handle that is required for any subsequent - * API invocation for that ablkcipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask); - static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { diff --git a/include/linux/dax.h b/include/linux/dax.h index 43d5f0b799c7..9c6dc7704043 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); -int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, pgoff_t index, bool wake_all); @@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev, #if defined(CONFIG_TRANSPARENT_HUGEPAGE) int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, unsigned int flags, get_block_t); -int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); #else static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, get_block_t gb) { return VM_FAULT_FALLBACK; } -#define __dax_pmd_fault dax_pmd_fault #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) -#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f53fa055021a..107d9abe7166 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -130,17 +130,18 @@ struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); - int (*d_compare)(const struct dentry *, const struct dentry *, + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool); - struct inode *(*d_select_inode)(struct dentry *, unsigned); - struct dentry *(*d_real)(struct dentry *, struct inode *); + struct dentry *(*d_real)(struct dentry *, const struct inode *, + unsigned int); } ____cacheline_aligned; /* @@ -206,10 +207,8 @@ struct dentry_operations { #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ -#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ - -#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ -#define DCACHE_OP_REAL 0x08000000 +#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */ +#define DCACHE_OP_REAL 0x04000000 #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ #define DCACHE_DENTRY_CURSOR 0x20000000 @@ -557,25 +556,27 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) return upper; } -static inline struct dentry *d_real(struct dentry *dentry) +/** + * d_real - Return the real dentry + * @dentry: the dentry to query + * @inode: inode to select the dentry from multiple layers (can be NULL) + * @flags: open flags to control copy-up behavior + * + * If dentry is on an union/overlay, then return the underlying, real dentry. + * Otherwise return the dentry itself. + * + * See also: Documentation/filesystems/vfs.txt + */ +static inline struct dentry *d_real(struct dentry *dentry, + const struct inode *inode, + unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) - return dentry->d_op->d_real(dentry, NULL); + return dentry->d_op->d_real(dentry, inode, flags); else return dentry; } -static inline struct inode *vfs_select_inode(struct dentry *dentry, - unsigned open_flags) -{ - struct inode *inode = d_inode(dentry); - - if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE)) - inode = dentry->d_op->d_select_inode(dentry, open_flags); - - return inode; -} - /** * d_real_inode - Return the real inode * @dentry: The dentry to query @@ -585,7 +586,7 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry, */ static inline struct inode *d_real_inode(struct dentry *dentry) { - return d_backing_inode(d_real(dentry)); + return d_backing_inode(d_real(dentry, NULL, 0)); } diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 46056cb161fc..d82bf1994485 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -38,7 +38,7 @@ struct debug_obj { * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object - * @is_static_object return true if the obj is static, otherwise return false + * @is_static_object: return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check * fails. All fixup functions must return true if fixup * was successful, otherwise return false diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 0830c9e86f0d..91acfce74a22 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -19,6 +19,15 @@ struct dm_table; struct mapped_device; struct bio_vec; +/* + * Type of table, mapped_device's mempool and request_queue + */ +#define DM_TYPE_NONE 0 +#define DM_TYPE_BIO_BASED 1 +#define DM_TYPE_REQUEST_BASED 2 +#define DM_TYPE_MQ_REQUEST_BASED 3 +#define DM_TYPE_DAX_BIO_BASED 4 + typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { @@ -116,6 +125,14 @@ typedef void (*dm_io_hints_fn) (struct dm_target *ti, */ typedef int (*dm_busy_fn) (struct dm_target *ti); +/* + * Returns: + * < 0 : error + * >= 0 : the number of bytes accessible at the address + */ +typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector, + void **kaddr, pfn_t *pfn, long size); + void dm_error(const char *message); struct dm_dev { @@ -162,6 +179,7 @@ struct target_type { dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; + dm_direct_access_fn direct_access; /* For internal device-mapper use. */ struct list_head list; @@ -444,6 +462,14 @@ int dm_table_add_target(struct dm_table *t, const char *type, void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); /* + * Target can use this to set the table's type. + * Can only ever be called from a target's ctr. + * Useful for "hybrid" target (supports both bio-based + * and request-based). + */ +void dm_table_set_type(struct dm_table *t, unsigned type); + +/* * Finally call this to make the table ready for use. */ int dm_table_complete(struct dm_table *t); diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a68cbe59e6ad..b91b023deffb 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -57,7 +57,8 @@ struct dm_io_notify { */ struct dm_io_client; struct dm_io_request { - int bi_rw; /* READ|WRITE - not READA */ + int bi_op; /* REQ_OP */ + int bi_op_flags; /* rq_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 79df69dc629c..aaff68efba5d 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -39,14 +39,22 @@ struct hsu_dma_chip { #if IS_ENABLED(CONFIG_HSU_DMA) /* Export to the internal users */ -irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status); +irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); int hsu_dma_remove(struct hsu_dma_chip *chip); #else -static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, - unsigned short nr) +static inline int hsu_dma_get_status(struct hsu_dma_chip *chip, + unsigned short nr, u32 *status) +{ + return 0; +} +static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, + unsigned short nr, u32 status) { return IRQ_NONE; } diff --git a/include/linux/drbd.h b/include/linux/drbd.h index d6b3c9943a2c..002611c85318 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -51,7 +51,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.6" +#define REL_VERSION "8.4.7" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 @@ -370,6 +370,14 @@ enum drbd_notification_type { NOTIFY_FLAGS = NOTIFY_CONTINUES, }; +enum drbd_peer_state { + P_INCONSISTENT = 3, + P_OUTDATED = 4, + P_DOWN = 5, + P_PRIMARY = 6, + P_FENCING = 7, +}; + #define UUID_JUST_CREATED ((__u64)4) enum write_ordering_e { diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index 2d0e5ad5de9d..c934d3a96b5e 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -123,15 +123,16 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf, __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF) __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF) __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF) + __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) + __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) + __u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF) __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF) __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF) __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) - __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) - __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) - /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */ __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED) ) GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 8ac8c5d9a3ad..ddac68422a96 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -126,8 +126,7 @@ #define DRBD_RESYNC_RATE_DEF 250 #define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ - /* less than 7 would hit performance unnecessarily. */ -#define DRBD_AL_EXTENTS_MIN 7 +#define DRBD_AL_EXTENTS_MIN 67 /* we use u16 as "slot number", (u16)~0 is "FREE". * If you use >= 292 kB on-disk ring buffer, * this is the maximum you can use: */ @@ -210,6 +209,12 @@ #define DRBD_MD_FLUSHES_DEF 1 #define DRBD_TCP_CORK_DEF 1 #define DRBD_AL_UPDATES_DEF 1 +/* We used to ignore the discard_zeroes_data setting. + * To not change established (and expected) behaviour, + * by default assume that, for discard_zeroes_data=0, + * we can make that an effective discard_zeroes_data=1, + * if we only explicitly zero-out unaligned partial chunks. */ +#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0 @@ -230,4 +235,10 @@ #define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX #define DRBD_SOCKET_CHECK_TIMEO_DEF 0 #define DRBD_SOCKET_CHECK_TIMEO_SCALE '1' + +#define DRBD_RS_DISCARD_GRANULARITY_MIN 0 +#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */ +#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */ +#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */ + #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index f196dd0b0f2f..7f80a75ee9e3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -536,116 +536,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, void efi_native_runtime_setup(void); /* - * EFI Configuration Table and GUID definitions + * EFI Configuration Table and GUID definitions + * + * These are all defined in a single line to make them easier to + * grep for and to see them at a glance - while still having a + * similar structure to the definitions in the spec. + * + * Here's how they are structured: + * + * GUID: 12345678-1234-1234-1234-123456789012 + * Spec: + * #define EFI_SOME_PROTOCOL_GUID \ + * {0x12345678,0x1234,0x1234,\ + * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}} + * Here: + * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12) + * ^ tabs ^extra space + * + * Note that the 'extra space' separates the values at the same place + * where the UEFI SPEC breaks the line. */ -#define NULL_GUID \ - EFI_GUID(0x00000000, 0x0000, 0x0000, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) - -#define MPS_TABLE_GUID \ - EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_TABLE_GUID \ - EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_20_TABLE_GUID \ - EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \ - 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) - -#define SMBIOS_TABLE_GUID \ - EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define SMBIOS3_TABLE_GUID \ - EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \ - 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) - -#define SAL_SYSTEM_TABLE_GUID \ - EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define HCDP_TABLE_GUID \ - EFI_GUID(0xf951938d, 0x620b, 0x42ef, \ - 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) - -#define UGA_IO_PROTOCOL_GUID \ - EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \ - 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) - -#define EFI_GLOBAL_VARIABLE_GUID \ - EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \ - 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) - -#define UV_SYSTEM_TABLE_GUID \ - EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \ - 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) - -#define LINUX_EFI_CRASH_GUID \ - EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \ - 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) - -#define LOADED_IMAGE_PROTOCOL_GUID \ - EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \ - 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ - EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \ - 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) - -#define EFI_UGA_PROTOCOL_GUID \ - EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \ - 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) - -#define EFI_PCI_IO_PROTOCOL_GUID \ - EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \ - 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) - -#define EFI_FILE_INFO_ID \ - EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_SYSTEM_RESOURCE_TABLE_GUID \ - EFI_GUID(0xb122a263, 0x3661, 0x4f68, \ - 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) - -#define EFI_FILE_SYSTEM_GUID \ - EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define DEVICE_TREE_GUID \ - EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \ - 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) - -#define EFI_PROPERTIES_TABLE_GUID \ - EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \ - 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) - -#define EFI_RNG_PROTOCOL_GUID \ - EFI_GUID(0x3152bca5, 0xeade, 0x433d, \ - 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) - -#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \ - EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \ - 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) - -#define EFI_CONSOLE_OUT_DEVICE_GUID \ - EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \ - 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) +#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) +#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) +#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) +#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) +#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) +#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) +#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) +#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) +#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) +#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) +#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) +#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) +#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) +#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) /* * This GUID is used to pass to the kernel proper the struct screen_info * structure that was populated by the stub based on the GOP protocol instance * associated with ConOut */ -#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \ - EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \ - 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) - -#define LINUX_EFI_LOADER_ENTRY_GUID \ - EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \ - 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) +#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) typedef struct { efi_guid_t guid; @@ -975,7 +917,6 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern void efi_get_time(struct timespec *now); extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; @@ -1465,4 +1406,55 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, unsigned long size); bool efi_runtime_disabled(void); +extern void efi_call_virt_check_flags(unsigned long flags, const char *call); + +/* + * Arch code can implement the following three template macros, avoiding + * reptition for the void/non-void return cases of {__,}efi_call_virt(): + * + * * arch_efi_call_virt_setup() + * + * Sets up the environment for the call (e.g. switching page tables, + * allowing kernel-mode use of floating point, if required). + * + * * arch_efi_call_virt() + * + * Performs the call. The last expression in the macro must be the call + * itself, allowing the logic to be shared by the void and non-void + * cases. + * + * * arch_efi_call_virt_teardown() + * + * Restores the usual kernel environment once the call has returned. + */ + +#define efi_call_virt_pointer(p, f, args...) \ +({ \ + efi_status_t __s; \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + __s = arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ + \ + __s; \ +}) + +#define __efi_call_virt_pointer(p, f, args...) \ +({ \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ +}) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 638b324f0291..e7f358d2e5fc 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); -typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); +typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, + struct request *, struct request *); typedef void (elevator_bio_merged_fn) (struct request_queue *, struct request *, struct bio *); @@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, int); +typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); @@ -46,7 +50,8 @@ struct elevator_ops elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; - elevator_allow_merge_fn *elevator_allow_merge_fn; + elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; + elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; @@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); -extern int elv_may_queue(struct request_queue *, int); +extern int elv_may_queue(struct request_queue *, int, int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask); @@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); -extern bool elv_rq_merge_ok(struct request *, struct bio *); +extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, struct elevator_type *); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 37ff4a6faa9a..6fec9e81bd70 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -374,6 +374,29 @@ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) } /** + * ether_addr_equal_masked - Compare two Ethernet addresses with a mask + * @addr1: Pointer to a six-byte array containing the 1st Ethernet address + * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address + * @mask: Pointer to a six-byte array containing the Ethernet address bitmask + * + * Compare two Ethernet addresses with a mask, returns true if for every bit + * set in the bitmask the equivalent bits in the ethernet addresses are equal. + * Using a mask with all bits set is a slower ether_addr_equal. + */ +static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, + const u8 *mask) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) { + if ((addr1[i] ^ addr2[i]) & mask[i]) + return false; + } + + return true; +} + +/** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d8414502edb4..b03c0625fa6e 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -6,6 +6,7 @@ struct dentry; struct iattr; struct inode; +struct iomap; struct super_block; struct vfsmount; @@ -187,21 +188,6 @@ struct fid { * get_name is not (which is possibly inconsistent) */ -/* types of block ranges for multipage write mappings. */ -#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ -#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ -#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ -#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ - -#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ - -struct iomap { - sector_t blkno; /* first sector of mapping */ - loff_t offset; /* file offset of mapping, bytes */ - u64 length; /* length of mapping, bytes */ - int type; /* type of mapping */ -}; - struct export_operations { int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent); diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 7abf674c388c..61004413dc64 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -126,42 +126,6 @@ struct extcon_dev { struct device_attribute *d_attrs_muex; }; -/** - * struct extcon_cable - An internal data for each cable of extcon device. - * @edev: The extcon device - * @cable_index: Index of this cable in the edev - * @attr_g: Attribute group for the cable - * @attr_name: "name" sysfs entry - * @attr_state: "state" sysfs entry - * @attrs: Array pointing to attr_name and attr_state for attr_g - */ -struct extcon_cable { - struct extcon_dev *edev; - int cable_index; - - struct attribute_group attr_g; - struct device_attribute attr_name; - struct device_attribute attr_state; - - struct attribute *attrs[3]; /* to be fed to attr_g.attrs */ -}; - -/** - * struct extcon_specific_cable_nb - An internal data for - * extcon_register_interest(). - * @user_nb: user provided notifier block for events from - * a specific cable. - * @cable_index: the target cable. - * @edev: the target extcon device. - * @previous_value: the saved previous event value. - */ -struct extcon_specific_cable_nb { - struct notifier_block *user_nb; - int cable_index; - struct extcon_dev *edev; - unsigned long previous_value; -}; - #if IS_ENABLED(CONFIG_EXTCON) /* @@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state); /* * get/set_cable_state access each bit of the 32b encoded state value. - * They are used to access the status of each cable based on the cable_name. + * They are used to access the status of each cable based on the cable id. */ extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id); extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, bool cable_state); -extern int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name); -extern int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, bool cable_state); - -/* - * Following APIs are for notifiees (those who want to be notified) - * to register a callback for events from a specific cable of the extcon. - * Notifiees are the connected device drivers wanting to get notified by - * a specific external port of a connection device. - */ -extern int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb); -extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb); - /* * Following APIs are to monitor every action of a notifier. * Registrar gets notified for every external port of a connection device. @@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); +extern int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); /* * Following API get the extcon device from devicetree. @@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, /* Following API to get information of extcon device */ extern const char *extcon_get_edev_name(struct extcon_dev *edev); + #else /* CONFIG_EXTCON */ static inline int extcon_dev_register(struct extcon_dev *edev) { @@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev, return 0; } -static inline int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name) -{ - return 0; -} - -static inline int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, int state) -{ - return 0; -} - static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { return NULL; @@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev, return 0; } -static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb) +static inline int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { - return 0; + return -ENOSYS; } -static inline int extcon_unregister_interest(struct extcon_specific_cable_nb - *obj) -{ - return 0; -} +static inline void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { } static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) @@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, return ERR_PTR(-ENODEV); } #endif /* CONFIG_EXTCON */ + +/* + * Following structure and API are deprecated. EXTCON remains the function + * definition to prevent the build break. + */ +struct extcon_specific_cable_nb { + struct notifier_block *user_nb; + int cable_index; + struct extcon_dev *edev; + unsigned long previous_value; +}; + +static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, + const char *extcon_name, const char *cable_name, + struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int extcon_unregister_interest(struct extcon_specific_cable_nb + *obj) +{ + return -EINVAL; +} #endif /* __LINUX_EXTCON_H__ */ diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index 53c60806bcfb..ac85f2061351 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h @@ -53,6 +53,7 @@ struct adc_jack_cond { * milli-seconds after the interrupt occurs. You may * describe such delays with @handling_delay_ms, which * is rounded-off by jiffies. + * @wakeup_source: flag to wake up the system for extcon events. */ struct adc_jack_pdata { const char *name; @@ -65,6 +66,7 @@ struct adc_jack_pdata { unsigned long irq_flags; unsigned long handling_delay_ms; /* in ms */ + bool wakeup_source; }; #endif /* _EXTCON_ADC_JACK_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h index 2056e9fd0138..1de1b3f6fb76 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -81,8 +81,6 @@ struct fence { unsigned long flags; ktime_t timestamp; int status; - struct list_head child_list; - struct list_head active_list; }; enum fence_flag_bits { diff --git a/include/linux/filter.h b/include/linux/filter.h index 8f74f3d61894..a16439b99fd9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -368,6 +368,11 @@ struct bpf_skb_data_end { void *data_end; }; +struct xdp_buff { + void *data; + void *data_end; +}; + /* compute the linear packet data range [data, data_end) which * will be accessed by cls_bpf and act_bpf programs */ @@ -429,6 +434,18 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return BPF_PROG_RUN(prog, skb); } +static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 ret; + + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, (void *)xdp); + rcu_read_unlock(); + + return ret; +} + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), @@ -513,6 +530,7 @@ bool bpf_helper_changes_skb_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index e65ef959546c..c46d2aa16d81 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -4,6 +4,7 @@ #include <linux/swap.h> #include <linux/mm.h> #include <linux/bitops.h> +#include <linux/jump_label.h> struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ @@ -14,7 +15,6 @@ struct frontswap_ops { struct frontswap_ops *next; /* private pointer to next ops */ }; -extern bool frontswap_enabled; extern void frontswap_register_ops(struct frontswap_ops *ops); extern void frontswap_shrink(unsigned long); extern unsigned long frontswap_curr_pages(void); @@ -30,7 +30,12 @@ extern void __frontswap_invalidate_page(unsigned, pgoff_t); extern void __frontswap_invalidate_area(unsigned); #ifdef CONFIG_FRONTSWAP -#define frontswap_enabled (1) +extern struct static_key_false frontswap_enabled_key; + +static inline bool frontswap_enabled(void) +{ + return static_branch_unlikely(&frontswap_enabled_key); +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -50,7 +55,10 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) #else /* all inline routines become no-ops and all externs are ignored */ -#define frontswap_enabled (0) +static inline bool frontswap_enabled(void) +{ + return false; +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -70,37 +78,35 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) static inline int frontswap_store(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_store(page); - if (frontswap_enabled) - ret = __frontswap_store(page); - return ret; + return -1; } static inline int frontswap_load(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_load(page); - if (frontswap_enabled) - ret = __frontswap_load(page); - return ret; + return -1; } static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_page(type, offset); } static inline void frontswap_invalidate_area(unsigned type) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_area(type); } static inline void frontswap_init(unsigned type, unsigned long *map) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_init(type, map); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 448641bcafc5..3f8167a49675 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -152,9 +152,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define CHECK_IOVEC_ONLY -1 /* - * The below are the various read and write types that we support. Some of + * The below are the various read and write flags that we support. Some of * them include behavioral modifiers that send information down to the - * block layer and IO scheduler. Terminology: + * block layer and IO scheduler. They should be used along with a req_op. + * Terminology: * * The block layer uses device plugging to defer IO a little bit, in * the hope that we will see more IO very shortly. This increases @@ -177,9 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * READ_SYNC A synchronous read. Device is not plugged, caller can * immediately wait on this read without caring about * unplugging. - * READA Used for read-ahead operations. Lower priority, and the - * block layer could (in theory) choose to ignore this - * request if it runs into resource problems. * WRITE A normal async write. Device will be plugged. * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down * the hint that someone will be waiting on this IO @@ -193,19 +191,17 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * non-volatile media on completion. * */ -#define RW_MASK REQ_WRITE -#define RWA_MASK REQ_RAHEAD +#define RW_MASK REQ_OP_WRITE -#define READ 0 -#define WRITE RW_MASK -#define READA RWA_MASK +#define READ REQ_OP_READ +#define WRITE REQ_OP_WRITE -#define READ_SYNC (READ | REQ_SYNC) -#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) -#define WRITE_ODIRECT (WRITE | REQ_SYNC) -#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) -#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) -#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) +#define READ_SYNC REQ_SYNC +#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) +#define WRITE_ODIRECT REQ_SYNC +#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) +#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) +#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) /* * Attribute flags. These should be or-ed together to figure out what @@ -402,6 +398,8 @@ struct address_space_operations { */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); @@ -459,7 +457,6 @@ struct block_device { struct inode * bd_inode; /* will die */ struct super_block * bd_super; struct mutex bd_mutex; /* open/close mutex */ - struct list_head bd_inodes; void * bd_claiming; void * bd_holder; int bd_holders; @@ -665,6 +662,7 @@ struct inode { #endif struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ union { struct hlist_head i_dentry; struct rcu_head i_rcu; @@ -1272,12 +1270,7 @@ static inline struct inode *file_inode(const struct file *f) static inline struct dentry *file_dentry(const struct file *file) { - struct dentry *dentry = file->f_path.dentry; - - if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) - return dentry->d_op->d_real(dentry, file_inode(file)); - else - return dentry; + return d_real(file->f_path.dentry, file_inode(file), 0); } static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) @@ -1448,6 +1441,9 @@ struct super_block { /* s_inode_list_lock protects s_inodes */ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; struct list_head s_inodes; /* all inodes */ + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; /* writeback inodes */ }; extern struct timespec current_fs_time(struct super_block *sb); @@ -2464,15 +2460,18 @@ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK -/* - * return READ, READA, or WRITE - */ -#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) +static inline bool op_is_write(unsigned int op) +{ + return op == REQ_OP_READ ? false : true; +} /* * return data direction, READ or WRITE */ -#define bio_data_dir(bio) ((bio)->bi_rw & 1) +static inline int bio_data_dir(struct bio *bio) +{ + return op_is_write(bio_op(bio)) ? WRITE : READ; +} extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); @@ -2507,6 +2506,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); +extern int filemap_check_errors(struct address_space *mapping); extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); @@ -2742,7 +2742,7 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK -extern blk_qc_t submit_bio(int, struct bio *); +extern blk_qc_t submit_bio(struct bio *); extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); @@ -2797,7 +2797,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); #ifdef CONFIG_BLOCK -typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, +typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, loff_t file_offset); enum { diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 0141f257d67b..eed9e853a06f 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -52,18 +52,6 @@ static inline int fsnotify_perm(struct file *file, int mask) } /* - * fsnotify_d_move - dentry has been moved - */ -static inline void fsnotify_d_move(struct dentry *dentry) -{ - /* - * On move we need to update dentry->d_flags to indicate if the new parent - * cares about events from this dentry. - */ - __fsnotify_update_dcache_flags(dentry); -} - -/* * fsnotify_link_count - inode's link count changed */ static inline void fsnotify_link_count(struct inode *inode) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 29f917517299..58205f33af02 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -267,10 +267,8 @@ static inline int fsnotify_inode_watches_children(struct inode *inode) * Update the dentry with a flag indicating the interest of its parent to receive * filesystem events when those events happens to this dentry->d_inode. */ -static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +static inline void fsnotify_update_flags(struct dentry *dentry) { - struct dentry *parent; - assert_spin_locked(&dentry->d_lock); /* @@ -280,21 +278,12 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) * find our entry, so it will spin until we complete here, and update * us with the new state. */ - parent = dentry->d_parent; - if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) + if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; } -/* - * fsnotify_d_instantiate - instantiate a dentry for inode - */ -static inline void __fsnotify_d_instantiate(struct dentry *dentry) -{ - __fsnotify_update_dcache_flags(dentry); -} - /* called from fsnotify listeners, such as fanotify or dnotify */ /* create a new group */ @@ -386,10 +375,7 @@ static inline void __fsnotify_inode_delete(struct inode *inode) static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} -static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) -{} - -static inline void __fsnotify_d_instantiate(struct dentry *dentry) +static inline void fsnotify_update_flags(struct dentry *dentry) {} static inline u32 fsnotify_get_cookie(void) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 66a36a815f0a..7d565afe35d2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -754,23 +754,27 @@ static inline void ftrace_init(void) { } /* * Structure that defines an entry function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. */ struct ftrace_graph_ent { unsigned long func; /* Current function */ int depth; -}; +} __packed; /* * Structure that defines a return function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. */ struct ftrace_graph_ret { unsigned long func; /* Current function */ - unsigned long long calltime; - unsigned long long rettime; /* Number of functions that overran the depth limit for current task */ unsigned long overrun; + unsigned long long calltime; + unsigned long long rettime; int depth; -}; +} __packed; /* Type of the callback handlers for tracing function graph*/ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 359a8e4bd44d..1dbf52f9c24b 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -205,7 +205,6 @@ struct gendisk { void *private_data; int flags; - struct device *driverfs_dev; // FIXME: remove struct kobject *slave_dir; struct timer_rand_state *random; @@ -414,7 +413,12 @@ static inline void free_part_info(struct hd_struct *part) extern void part_round_stats(int cpu, struct hd_struct *part); /* block/genhd.c */ -extern void add_disk(struct gendisk *disk); +extern void device_add_disk(struct device *parent, struct gendisk *disk); +static inline void add_disk(struct gendisk *disk) +{ + device_add_disk(NULL, disk); +} + extern void del_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 570383a41853..f8041f9de31e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -78,8 +78,7 @@ struct vm_area_struct; * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. * - * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant - * to kmem allocations). + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) @@ -238,9 +237,11 @@ struct vm_area_struct; * are expected to be movable via page reclaim or page migration. Typically, * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. * - * GFP_TRANSHUGE is used for THP allocations. They are compound allocations - * that will fail quickly if memory is not available and will not wake - * kswapd on failure. + * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are + * compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) @@ -255,9 +256,9 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) -#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ - ~__GFP_RECLAIM) +#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) +#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) @@ -486,10 +487,6 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) -extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); -extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, - unsigned int order); - extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -513,9 +510,6 @@ extern void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void __free_page_frag(void *addr); -extern void __free_kmem_pages(struct page *page, unsigned int order); -extern void free_kmem_pages(unsigned long addr, unsigned int order); - #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 2790591c77cf..57402544b53f 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -246,7 +246,7 @@ struct hsi_port { int (*stop_tx)(struct hsi_client *cl); int (*release)(struct hsi_client *cl); /* private */ - struct atomic_notifier_head n_head; + struct blocking_notifier_head n_head; }; #define to_hsi_port(dev) container_of(dev, struct hsi_port, device) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f0a7a0320300..6f14de45b5ce 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,25 +1,17 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - unsigned int flags); +extern int do_huge_pmd_anonymous_page(struct fault_env *fe); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd, int dirty); -extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); -extern int madvise_free_huge_pmd(struct mmu_gather *tlb, +extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next); extern int zap_huge_pmd(struct mmu_gather *tlb, @@ -49,6 +41,18 @@ enum transparent_hugepage_flag { #endif }; +struct kobject; +struct kobj_attribute; + +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); +extern struct kobj_attribute shmem_enabled_attr; + #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) @@ -134,8 +138,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp); +extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -152,6 +155,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) struct page *get_huge_zero_page(void); void put_huge_zero_page(void); +#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -161,6 +166,8 @@ void put_huge_zero_page(void); #define transparent_hugepage_enabled(__vma) 0 +static inline void prep_transhuge_page(struct page *page) {} + #define transparent_hugepage_flags 0UL static inline int split_huge_page_to_list(struct page *page, struct list_head *list) @@ -196,8 +203,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp) +static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8f1b086ca5bc..c2e3324f9468 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -23,6 +23,8 @@ #define _LINUX_I2C_SMBUS_H #include <linux/i2c.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> /** @@ -48,4 +50,31 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); +/** + * smbus_host_notify - internal structure used by the Host Notify mechanism. + * @adapter: the I2C adapter associated with this struct + * @work: worker used to schedule the IRQ in the slave device + * @lock: spinlock to check if a notification is already pending + * @pending: flag set when a notification is pending (any new notification will + * be rejected if pending is true) + * @payload: the actual payload of the Host Notify event + * @addr: the address of the slave device which raised the notification + * + * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does + * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a + * managed resource to clean this up when the adapter get released. + */ +struct smbus_host_notify { + struct i2c_adapter *adapter; + struct work_struct work; + spinlock_t lock; + bool pending; + u16 payload; + u8 addr; +}; + +struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap); +int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, + unsigned short addr, unsigned int data); + #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 96a25ae14494..fffdc270ca18 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -126,6 +126,11 @@ i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, u8 command, u8 length, u8 *values); #endif /* I2C */ +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY, +}; + /** * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) @@ -180,8 +185,11 @@ struct i2c_driver { * The format and meaning of the data value depends on the protocol. * For the SMBus alert protocol, there is a single bit of data passed * as the alert response's low bit ("event flag"). + * For the SMBus Host Notify protocol, the data corresponds to the + * 16-bit payload data reported by the slave device acting as master. */ - void (*alert)(struct i2c_client *, unsigned int data); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, + unsigned int data); /* a ioctl like command that can be used to perform specific functions * with the device. @@ -349,6 +357,11 @@ extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); extern struct i2c_client * i2c_new_dummy(struct i2c_adapter *adap, u16 address); +extern struct i2c_client * +i2c_new_secondary_device(struct i2c_client *client, + const char *name, + u16 default_addr); + extern void i2c_unregister_device(struct i2c_client *); #endif /* I2C */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 630f45335c73..57086e9fc64c 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -14,9 +14,12 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_IPV6) extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); -typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info); +typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len); #else diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b118744d3382..a80516fd65c8 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <linux/etherdevice.h> #include <asm/byteorder.h> #include <asm/unaligned.h> @@ -2464,7 +2465,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) */ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) { - if (skb->len < 25) + if (skb->len < IEEE80211_MIN_ACTION_SIZE) return false; return _ieee80211_is_robust_mgmt_frame((void *)skb->data); } @@ -2487,6 +2488,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, } /** + * _ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @hdr: the frame + */ +static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (!ieee80211_is_action(hdr->frame_control) || + !is_multicast_ether_addr(hdr->addr1)) + return false; + + return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION || + mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION; +} + +/** + * ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @skb: the skb containing the frame, length will be checked + */ +static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) +{ + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return false; + return _ieee80211_is_group_privacy_action((void *)skb->data); +} + +/** * ieee80211_tu_to_usec - convert time units (TU) to microseconds * @tu: the TUs */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index acedbb68a5a3..ddb890174a0e 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -31,6 +31,8 @@ #define IEEE802154_MIN_PSDU_LEN 9 #define IEEE802154_FCS_LEN 2 #define IEEE802154_MAX_AUTH_TAG_LEN 16 +#define IEEE802154_FC_LEN 2 +#define IEEE802154_SEQ_LEN 1 /* General MAC frame format: * 2 bytes: Frame Control @@ -48,6 +50,7 @@ #define IEEE802154_EXTENDED_ADDR_LEN 8 #define IEEE802154_SHORT_ADDR_LEN 2 +#define IEEE802154_PAN_ID_LEN 2 #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 @@ -221,9 +224,17 @@ enum { #define IEEE802154_FCTL_ACKREQ 0x0020 #define IEEE802154_FCTL_SECEN 0x0004 #define IEEE802154_FCTL_INTRA_PAN 0x0040 +#define IEEE802154_FCTL_DADDR 0x0c00 +#define IEEE802154_FCTL_SADDR 0xc000 #define IEEE802154_FTYPE_DATA 0x0001 +#define IEEE802154_FCTL_ADDR_NONE 0x0000 +#define IEEE802154_FCTL_DADDR_SHORT 0x0800 +#define IEEE802154_FCTL_DADDR_EXTENDED 0x0c00 +#define IEEE802154_FCTL_SADDR_SHORT 0x8000 +#define IEEE802154_FCTL_SADDR_EXTENDED 0xc000 + /* * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder @@ -261,6 +272,24 @@ static inline bool ieee802154_is_intra_pan(__le16 fc) return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); } +/* + * ieee802154_daddr_mode - get daddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_daddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_DADDR); +} + +/* + * ieee802154_saddr_mode - get saddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_saddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SADDR); +} + /** * ieee802154_is_valid_psdu_len - check if psdu len is valid * available lengths: diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 99403b19092f..228bd44efa4c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -223,6 +223,7 @@ struct st_sensor_settings { * @get_irq_data_ready: Function to get the IRQ used for data ready signal. * @tf: Transfer function structure used by I/O operations. * @tb: Transfer buffers and mutex used by I/O operations. + * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. */ @@ -250,14 +251,13 @@ struct st_sensor_data { const struct st_sensor_transfer_function *tf; struct st_sensor_transfer_buffer tb; + bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; }; #ifdef CONFIG_IIO_BUFFER irqreturn_t st_sensors_trigger_handler(int irq, void *p); - -int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); #endif #ifdef CONFIG_IIO_TRIGGER @@ -287,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); -void st_sensors_power_enable(struct iio_dev *indio_dev); +int st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 7c29cb0124ae..854e2dad1e0d 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -312,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, }, \ } -/** - * iio_get_time_ns() - utility function to get a time stamp for events etc - **/ -static inline s64 iio_get_time_ns(void) -{ - return ktime_get_real_ns(); -} +s64 iio_get_time_ns(const struct iio_dev *indio_dev); +unsigned int iio_get_time_res(const struct iio_dev *indio_dev); /* Device operating modes */ #define INDIO_DIRECT_MODE 0x01 @@ -497,6 +492,7 @@ struct iio_buffer_setup_ops { * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @info: [DRIVER] callbacks and constant info from driver + * @clock_id: [INTERN] timestamping clock posix identifier * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable @@ -537,6 +533,7 @@ struct iio_dev { struct attribute_group chan_attr_group; const char *name; const struct iio_info *info; + clockid_t clock_id; struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; @@ -565,7 +562,7 @@ extern struct bus_type iio_bus_type; /** * iio_device_put() - reference counted deallocation of struct device - * @indio_dev: IIO device structure containing the device + * @indio_dev: IIO device structure containing the device **/ static inline void iio_device_put(struct iio_dev *indio_dev) { @@ -574,6 +571,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev) } /** + * iio_device_get_clock() - Retrieve current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + */ +static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) +{ + return indio_dev->clock_id; +} + +/** * dev_to_iio_dev() - Get IIO device struct from a device struct * @dev: The device embedded in the IIO device * diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h new file mode 100644 index 000000000000..23ca41515527 --- /dev/null +++ b/include/linux/iio/sw_device.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software device interface + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_DEVICE +#define __IIO_SW_DEVICE + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/configfs.h> + +#define module_iio_sw_device_driver(__iio_sw_device_type) \ + module_driver(__iio_sw_device_type, iio_register_sw_device_type, \ + iio_unregister_sw_device_type) + +struct iio_sw_device_ops; + +struct iio_sw_device_type { + const char *name; + struct module *owner; + const struct iio_sw_device_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_device { + struct iio_dev *device; + struct iio_sw_device_type *device_type; + struct config_group group; +}; + +struct iio_sw_device_ops { + struct iio_sw_device* (*probe)(const char *); + int (*remove)(struct iio_sw_device *); +}; + +static inline +struct iio_sw_device *to_iio_sw_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_device, + group); +} + +int iio_register_sw_device_type(struct iio_sw_device_type *dt); +void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); + +struct iio_sw_device *iio_sw_device_create(const char *, const char *); +void iio_sw_device_destroy(struct iio_sw_device *); + +int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); +void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); + +static inline +void iio_swd_group_init_type_name(struct iio_sw_device *d, + const char *name, + struct config_item_type *type) +{ +#ifdef CONFIG_CONFIGFS_FS + config_group_init_type_name(&d->group, name, type); +#endif +} + +#endif /* __IIO_SW_DEVICE */ diff --git a/include/linux/input.h b/include/linux/input.h index 1e967694e9a5..a65e3b24fb18 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -95,7 +95,7 @@ struct input_value { * @grab: input handle that currently has the device grabbed (via * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole * recipient for all input events coming from the device - * @event_lock: this spinlock is is taken when input core receives + * @event_lock: this spinlock is taken when input core receives * and processes a new event for the device (in input_event()). * Code that accesses and/or modifies parameters of a device * (such as keymap or absmin, absmax, absfuzz, etc.) after device diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h index c91e1376132b..09d22ccb9e41 100644 --- a/include/linux/input/touchscreen.h +++ b/include/linux/input/touchscreen.h @@ -10,7 +10,26 @@ #define _TOUCHSCREEN_H struct input_dev; +struct input_mt_pos; -void touchscreen_parse_properties(struct input_dev *dev, bool multitouch); +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + struct touchscreen_properties *prop); + +void touchscreen_set_mt_pos(struct input_mt_pos *pos, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y); + +void touchscreen_report_pos(struct input_dev *input, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y, + bool multitouch); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9fcabeb07787..b6683f0ffc9f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); +struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); + #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { return 0; } + +static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) +{ + *nr_vecs = 1; + return NULL; +} #endif /* CONFIG_SMP */ /* diff --git a/include/linux/iomap.h b/include/linux/iomap.h new file mode 100644 index 000000000000..3267df461012 --- /dev/null +++ b/include/linux/iomap.h @@ -0,0 +1,70 @@ +#ifndef LINUX_IOMAP_H +#define LINUX_IOMAP_H 1 + +#include <linux/types.h> + +struct fiemap_extent_info; +struct inode; +struct iov_iter; +struct kiocb; +struct vm_area_struct; +struct vm_fault; + +/* + * Types of block ranges for iomap mappings: + */ +#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ +#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ +#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */ +#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ + +/* + * Magic value for blkno: + */ +#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ + +struct iomap { + sector_t blkno; /* 1st sector of mapping, 512b units */ + loff_t offset; /* file offset of mapping, bytes */ + u64 length; /* length of mapping, bytes */ + int type; /* type of mapping */ + struct block_device *bdev; /* block device for I/O */ +}; + +/* + * Flags for iomap_begin / iomap_end. No flag implies a read. + */ +#define IOMAP_WRITE (1 << 0) +#define IOMAP_ZERO (1 << 1) + +struct iomap_ops { + /* + * Return the existing mapping at pos, or reserve space starting at + * pos for up to length, as long as we can do it as a single mapping. + * The actual length is returned in iomap->length. + */ + int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, + unsigned flags, struct iomap *iomap); + + /* + * Commit and/or unreserve space previous allocated using iomap_begin. + * Written indicates the length of the successful write operation which + * needs to be commited, while the rest needs to be unreserved. + * Written might be zero if no data was written. + */ + int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, + ssize_t written, unsigned flags, struct iomap *iomap); +}; + +ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, + struct iomap_ops *ops); +int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, + bool *did_zero, struct iomap_ops *ops); +int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, + struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + struct iomap_ops *ops); +int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, struct iomap_ops *ops); + +#endif /* LINUX_IOMAP_H */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 838dbfa3c331..78c5d5ae3857 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -277,7 +277,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len); */ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, - SI_PCI, SI_DEVICETREE, SI_DEFAULT + SI_PCI, SI_DEVICETREE, SI_LAST }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5c91b0b055d4..c6dbcd84a2c7 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -283,6 +283,8 @@ struct tcp6_timewait_sock { }; #if IS_ENABLED(CONFIG_IPV6) +bool ipv6_mod_enabled(void); + static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) { return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; @@ -326,6 +328,11 @@ static inline int inet_v6_ipv6only(const struct sock *sk) #define ipv6_only_sock(sk) 0 #define ipv6_sk_rxinfo(sk) 0 +static inline bool ipv6_mod_enabled(void) +{ + return false; +} + static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) { return NULL; diff --git a/include/linux/irq.h b/include/linux/irq.h index 4d758a7c604a..b52424eaa0ed 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -197,6 +197,7 @@ struct irq_data { * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU + * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -212,6 +213,7 @@ enum { IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), IRQD_FORWARDED_TO_VCPU = (1 << 20), + IRQD_AFFINITY_MANAGED = (1 << 21), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } +static inline bool irqd_affinity_is_managed(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) @@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) /** * struct irq_chip - hardware interrupt chip descriptor * + * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) @@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @flags: chip specific flags */ struct irq_chip { + struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); @@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_untracked_irq(struct irq_desc *desc); extern void handle_percpu_irq(struct irq_desc *desc); extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +extern int irq_chip_pm_get(struct irq_data *data); +extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); @@ -701,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner); + struct module *owner, const struct cpumask *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ - __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ irq_alloc_descs(-1, 0, 1, node) diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index dc493e0f0ff7..107eed475b94 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -204,6 +204,7 @@ #define GITS_BASER_NR_REGS 8 #define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_INDIRECT (1UL << 62) #define GITS_BASER_nCnB (0UL << 59) #define GITS_BASER_nC (1UL << 59) #define GITS_BASER_RaWt (2UL << 59) @@ -228,6 +229,7 @@ #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -238,6 +240,8 @@ #define GITS_BASER_TYPE_RESERVED6 6 #define GITS_BASER_TYPE_RESERVED7 7 +#define GITS_LVL1_ENTRY_SIZE (8UL) + /* * ITS commands */ diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index fd051855539b..eafc965b3eb8 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -101,9 +101,14 @@ #include <linux/irqdomain.h> struct device_node; +struct gic_chip_data; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); +void gic_cpu_save(struct gic_chip_data *gic); +void gic_cpu_restore(struct gic_chip_data *gic); +void gic_dist_save(struct gic_chip_data *gic); +void gic_dist_restore(struct gic_chip_data *gic); /* * Subdrivers that need some preparatory work can initialize their @@ -112,6 +117,12 @@ int gic_cpu_if_down(unsigned int gic_nr); int gic_of_init(struct device_node *node, struct device_node *parent); /* + * Initialises and registers a non-root or child GIC chip. Memory for + * the gic_chip_data structure is dynamically allocated. + */ +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); + +/* * Legacy platforms not converted to DT yet must use this to init * their GIC */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index f1f36e04d885..ffb84604c1de 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -39,6 +39,7 @@ struct irq_domain; struct of_device_id; struct irq_chip; struct irq_data; +struct cpumask; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, - irq_hw_number_t hwirq, int node); + irq_hw_number_t hwirq, int node, + const struct cpumask *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc); + bool realloc, const struct cpumask *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern void irq_domain_activate_irq(struct irq_data *irq_data); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); @@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { - return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, + NULL); } extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, @@ -452,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, return -1; } +static inline void irq_domain_free_irqs(unsigned int virq, + unsigned int nr_irqs) { } + static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index efb232c5f668..dfaa1f4dcb0c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -491,10 +491,6 @@ struct jbd2_journal_handle unsigned long h_start_jiffies; unsigned int h_requested_credits; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map h_lockdep_map; -#endif }; @@ -793,6 +789,7 @@ jbd2_time_diff(unsigned long start, unsigned long end) * @j_proc_entry: procfs entry for the jbd statistics directory * @j_stats: Overall statistics * @j_private: An opaque pointer to fs-private information. + * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ struct journal_s @@ -1035,8 +1032,26 @@ struct journal_s /* Precomputed journal UUID checksum for seeding other checksums */ __u32 j_csum_seed; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Lockdep entity to track transaction commit dependencies. Handles + * hold this "lock" for read, when we wait for commit, we acquire the + * "lock" for writing. This matches the properties of jbd2 journalling + * where the running transaction has to wait for all handles to be + * dropped to commit that transaction and also acquiring a handle may + * require transaction commit to finish. + */ + struct lockdep_map j_trans_commit_map; +#endif }; +#define jbd2_might_wait_for_commit(j) \ + do { \ + rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ + } while (0) + /* journal feature predicate functions */ #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index ac4b3c46a84d..c9cf374445d8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -77,6 +77,7 @@ void kasan_free_shadow(const struct vm_struct *vm); size_t ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct kmem_cache *cache); #else /* CONFIG_KASAN */ @@ -121,6 +122,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} static inline void kasan_unpoison_slab(const void *ptr) { } +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index a19bcf9e762e..410decacff8f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void); static inline int kdb_process_cpu(const struct task_struct *p) { - unsigned int cpu = task_thread_info(p)->cpu; + unsigned int cpu = task_cpu(p); if (cpu > num_possible_cpus()) cpu = 0; return cpu; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 94aa10ffe156..c42082112ec8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -451,6 +451,7 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; +extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 25a822f6f000..44fda64ad434 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -92,7 +92,6 @@ static inline void account_process_tick(struct task_struct *tsk, int user) extern void account_process_tick(struct task_struct *, int user); #endif -extern void account_steal_ticks(unsigned long ticks); extern void account_idle_ticks(unsigned long ticks); #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index eeb307985715..1e032a1ddb3e 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -4,6 +4,11 @@ #include <linux/sched.h> /* MMF_VM_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern struct attribute_group khugepaged_attr_group; + +extern int khugepaged_init(void); +extern void khugepaged_destroy(void); +extern int start_stop_khugepaged(void); extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7ae216a39c9e..481c8c4627ca 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page) static inline void set_page_stable_node(struct page *page, struct stable_node *stable_node) { - page->mapping = (void *)stable_node + - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } /* diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h new file mode 100644 index 000000000000..49b37ed8d456 --- /dev/null +++ b/include/linux/leds-lp3952.h @@ -0,0 +1,125 @@ +/* + * LED driver for TI lp3952 controller + * + * Copyright (C) 2016, DAQRI, LLC. + * Author: Tony Makkiel <tony.makkiel@daqri.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef LEDS_LP3952_H_ +#define LEDS_LP3952_H_ + +#define LP3952_NAME "lp3952" +#define LP3952_CMD_REG_COUNT 8 +#define LP3952_BRIGHT_MAX 4 +#define LP3952_LABEL_MAX_LEN 15 + +#define LP3952_REG_LED_CTRL 0x00 +#define LP3952_REG_R1_BLNK_TIME_CTRL 0x01 +#define LP3952_REG_R1_BLNK_CYCLE_CTRL 0x02 +#define LP3952_REG_G1_BLNK_TIME_CTRL 0x03 +#define LP3952_REG_G1_BLNK_CYCLE_CTRL 0x04 +#define LP3952_REG_B1_BLNK_TIME_CTRL 0x05 +#define LP3952_REG_B1_BLNK_CYCLE_CTRL 0x06 +#define LP3952_REG_ENABLES 0x0B +#define LP3952_REG_PAT_GEN_CTRL 0x11 +#define LP3952_REG_RGB1_MAX_I_CTRL 0x12 +#define LP3952_REG_RGB2_MAX_I_CTRL 0x13 +#define LP3952_REG_CMD_0 0x50 +#define LP3952_REG_RESET 0x60 +#define REG_MAX LP3952_REG_RESET + +#define LP3952_PATRN_LOOP BIT(1) +#define LP3952_PATRN_GEN_EN BIT(2) +#define LP3952_INT_B00ST_LDR BIT(2) +#define LP3952_ACTIVE_MODE BIT(6) +#define LP3952_LED_MASK_ALL 0x3f + +/* Transition Time in ms */ +enum lp3952_tt { + TT0, + TT55, + TT110, + TT221, + TT422, + TT885, + TT1770, + TT3539 +}; + +/* Command Execution Time in ms */ +enum lp3952_cet { + CET197, + CET393, + CET590, + CET786, + CET1180, + CET1376, + CET1573, + CET1769, + CET1966, + CET2163, + CET2359, + CET2556, + CET2763, + CET2949, + CET3146 +}; + +/* Max Current in % */ +enum lp3952_colour_I_log_0 { + I0, + I7, + I14, + I21, + I32, + I46, + I71, + I100 +}; + +enum lp3952_leds { + LP3952_BLUE_2, + LP3952_GREEN_2, + LP3952_RED_2, + LP3952_BLUE_1, + LP3952_GREEN_1, + LP3952_RED_1, + LP3952_LED_ALL +}; + +struct lp3952_ctrl_hdl { + struct led_classdev cdev; + char name[LP3952_LABEL_MAX_LEN]; + enum lp3952_leds channel; + void *priv; +}; + +struct ptrn_gen_cmd { + union { + struct { + u16 tt:3; + u16 b:3; + u16 cet:4; + u16 g:3; + u16 r:3; + }; + struct { + u8 lsb; + u8 msb; + } bytes; + }; +} __packed; + +struct lp3952_led_array { + struct regmap *regmap; + struct i2c_client *client; + struct gpio_desc *enable_gpio; + struct lp3952_ctrl_hdl leds[LP3952_LED_ALL]; +}; + +#endif /* LEDS_LP3952_H_ */ diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h index b8d6fffed4d8..d215b4561180 100644 --- a/include/linux/leds-pca9532.h +++ b/include/linux/leds-pca9532.h @@ -16,6 +16,7 @@ #include <linux/leds.h> #include <linux/workqueue.h> +#include <dt-bindings/leds/leds-pca9532.h> enum pca9532_state { PCA9532_OFF = 0x0, @@ -24,16 +25,14 @@ enum pca9532_state { PCA9532_PWM1 = 0x3 }; -enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, - PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO }; - struct pca9532_led { u8 id; struct i2c_client *client; - char *name; + const char *name; + const char *default_trigger; struct led_classdev ldev; struct work_struct work; - enum pca9532_type type; + u32 type; enum pca9532_state state; }; diff --git a/include/linux/leds.h b/include/linux/leds.h index e5e7f2e80a54..8a3b5d29602f 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -325,10 +325,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) #endif /* CONFIG_LEDS_TRIGGERS */ /* Trigger specific functions */ -#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK -extern void ledtrig_ide_activity(void); +#ifdef CONFIG_LEDS_TRIGGER_DISK +extern void ledtrig_disk_activity(void); #else -static inline void ledtrig_ide_activity(void) {} +static inline void ledtrig_disk_activity(void) {} #endif #ifdef CONFIG_LEDS_TRIGGER_MTD @@ -387,8 +387,16 @@ struct gpio_led_platform_data { unsigned long *delay_off); }; +#ifdef CONFIG_NEW_LEDS struct platform_device *gpio_led_register_device( int id, const struct gpio_led_platform_data *pdata); +#else +static inline struct platform_device *gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata) +{ + return 0; +} +#endif enum cpu_led_event { CPU_LED_IDLE_START, /* CPU enters idle */ diff --git a/include/linux/libata.h b/include/linux/libata.h index d15c19e331d1..e37d4f99f510 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -146,13 +146,6 @@ enum { ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ - /* protocol flags */ - ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ - ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ - ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA, - ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ - ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ - /* struct ata_device stuff */ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ @@ -1039,58 +1032,29 @@ extern const unsigned long sata_deb_timing_long[]; extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; -/* - * protocol tests - */ -static inline unsigned int ata_prot_flags(u8 prot) -{ - switch (prot) { - case ATA_PROT_NODATA: - return 0; - case ATA_PROT_PIO: - return ATA_PROT_FLAG_PIO; - case ATA_PROT_DMA: - return ATA_PROT_FLAG_DMA; - case ATA_PROT_NCQ: - return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ; - case ATAPI_PROT_NODATA: - return ATA_PROT_FLAG_ATAPI; - case ATAPI_PROT_PIO: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO; - case ATAPI_PROT_DMA: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA; - } - return 0; -} - -static inline int ata_is_atapi(u8 prot) -{ - return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI; -} - -static inline int ata_is_nodata(u8 prot) +static inline bool ata_is_atapi(u8 prot) { - return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA); + return prot & ATA_PROT_FLAG_ATAPI; } -static inline int ata_is_pio(u8 prot) +static inline bool ata_is_pio(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO; + return prot & ATA_PROT_FLAG_PIO; } -static inline int ata_is_dma(u8 prot) +static inline bool ata_is_dma(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA; + return prot & ATA_PROT_FLAG_DMA; } -static inline int ata_is_ncq(u8 prot) +static inline bool ata_is_ncq(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ; + return prot & ATA_PROT_FLAG_NCQ; } -static inline int ata_is_data(u8 prot) +static inline bool ata_is_data(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA; + return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA); } static inline int is_multi_taskfile(struct ata_taskfile *tf) @@ -1407,7 +1371,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return ap->nr_pmp_links != 0; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return link == &link->ap->link || link == link->ap->slave_link; } @@ -1422,7 +1386,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return false; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return 1; } diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 0c3c30cbbea5..b519e137b9b7 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -52,6 +52,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nd_namespace_label; struct nvdimm_drvdata; + struct nd_mapping { struct nvdimm *nvdimm; struct nd_namespace_label **labels; @@ -69,6 +70,7 @@ struct nd_mapping { struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long cmd_mask; + struct module *module; char *provider_name; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); @@ -99,13 +101,21 @@ struct nd_region_desc { unsigned long flags; }; +struct device; +void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, + size_t size, unsigned long flags); +static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, + resource_size_t offset, size_t size) +{ + return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); +} + struct nvdimm_bus; struct module; struct device; struct nd_blk_region; struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); struct nd_region_desc ndr_desc; @@ -119,22 +129,22 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); -struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, - struct nvdimm_bus_descriptor *nfit_desc, struct module *module); -#define nvdimm_bus_register(parent, desc) \ - __nvdimm_bus_register(parent, desc, THIS_MODULE) +struct nvdimm_bus *nvdimm_bus_register(struct device *parent, + struct nvdimm_bus_descriptor *nfit_desc); void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); +struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, - unsigned long cmd_mask); + unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, @@ -156,4 +166,6 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); +void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_has_flush(struct nd_region *nd_region); #endif /* __LIBNVDIMM_H__ */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index ef2c7d2e76c4..ba78b8306674 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,7 +1,9 @@ #ifndef NVM_H #define NVM_H +#include <linux/blkdev.h> #include <linux/types.h> +#include <uapi/linux/lightnvm.h> enum { NVM_IO_OK = 0, @@ -269,24 +271,15 @@ struct nvm_lun { int lun_id; int chnl_id; - /* It is up to the target to mark blocks as closed. If the target does - * not do it, all blocks are marked as open, and nr_open_blocks - * represents the number of blocks in use - */ - unsigned int nr_open_blocks; /* Number of used, writable blocks */ - unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ - unsigned int nr_free_blocks; /* Number of unused blocks */ - unsigned int nr_bad_blocks; /* Number of bad blocks */ - spinlock_t lock; + unsigned int nr_free_blocks; /* Number of unused blocks */ struct nvm_block *blocks; }; enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ - NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ + NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; @@ -385,6 +378,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, { struct ppa_addr l; + l.ppa = 0; /* * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. */ @@ -455,6 +449,8 @@ struct nvm_tgt_type { struct list_head list; }; +extern struct nvm_tgt_type *nvm_find_target_type(const char *, int); + extern int nvm_register_tgt_type(struct nvm_tgt_type *); extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); @@ -463,6 +459,9 @@ extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); typedef int (nvmm_register_fn)(struct nvm_dev *); typedef void (nvmm_unregister_fn)(struct nvm_dev *); + +typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); +typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, struct nvm_lun *, unsigned long); typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); @@ -488,9 +487,10 @@ struct nvmm_type { nvmm_register_fn *register_mgr; nvmm_unregister_fn *unregister_mgr; + nvmm_create_tgt_fn *create_tgt; + nvmm_remove_tgt_fn *remove_tgt; + /* Block administration callbacks */ - nvmm_get_blk_fn *get_blk_unlocked; - nvmm_put_blk_fn *put_blk_unlocked; nvmm_get_blk_fn *get_blk; nvmm_put_blk_fn *put_blk; nvmm_open_blk_fn *open_blk; @@ -520,10 +520,6 @@ struct nvmm_type { extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); -extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, - struct nvm_lun *, unsigned long); -extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); - extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, unsigned long); extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); @@ -532,11 +528,13 @@ extern int nvm_register(struct request_queue *, char *, struct nvm_dev_ops *); extern void nvm_unregister(char *); +void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); + extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, - struct ppa_addr *, int, int); + const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); diff --git a/include/linux/list.h b/include/linux/list.h index 5356f4d661a7..5183138aa932 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h) } /* + * Check whether the node is the only node of the head without + * accessing head: + */ +static inline bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} + +/* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h index a243dbba8659..61f5b21b31c7 100644 --- a/include/linux/mdio-mux.h +++ b/include/linux/mdio-mux.h @@ -10,11 +10,13 @@ #ifndef __LINUX_MDIO_MUX_H #define __LINUX_MDIO_MUX_H #include <linux/device.h> +#include <linux/phy.h> int mdio_mux_init(struct device *dev, int (*switch_fn) (int cur, int desired, void *data), void **mux_handle, - void *data); + void *data, + struct mii_bus *mux_bus); void mdio_mux_uninit(void *mux_handle); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 3106ac1c895e..2925da23505d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -73,8 +73,8 @@ extern bool movable_node_enabled; if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end, - int nid, ulong flags); + phys_addr_t start, phys_addr_t end, + int nid, ulong flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); @@ -110,7 +110,7 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, phys_addr_t *out_end, int *out_nid); void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, - phys_addr_t *out_end); + phys_addr_t *out_end); /** * for_each_mem_range - iterate through memblock areas from type_a and not @@ -148,7 +148,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, p_start, p_end, p_nid) \ for (i = (u64)ULLONG_MAX, \ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ - p_start, p_end, p_nid); \ + p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) @@ -163,8 +163,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, * is initialized. */ #define for_each_reserved_mem_region(i, p_start, p_end) \ - for (i = 0UL, \ - __next_reserved_mem_region(&i, p_start, p_end); \ + for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ i != (u64)ULLONG_MAX; \ __next_reserved_mem_region(&i, p_start, p_end)) @@ -333,6 +332,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); +void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); int memblock_is_map_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); @@ -403,15 +403,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo } #define for_each_memblock(memblock_type, region) \ - for (region = memblock.memblock_type.regions; \ + for (region = memblock.memblock_type.regions; \ region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_type(memblock_type, rgn) \ - idx = 0; \ - rgn = &memblock_type->regions[idx]; \ - for (idx = 0; idx < memblock_type->cnt; \ - idx++,rgn = &memblock_type->regions[idx]) + for (idx = 0, rgn = &memblock_type->regions[0]; \ + idx < memblock_type->cnt; \ + idx++, rgn = &memblock_type->regions[idx]) #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 56e6069d2452..5d8ca6e02e39 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -52,7 +52,7 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ - MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, + MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS, MEMCG_SLAB_RECLAIMABLE, MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SOCK, @@ -60,7 +60,7 @@ enum mem_cgroup_stat_index { }; struct mem_cgroup_reclaim_cookie { - struct zone *zone; + pg_data_t *pgdat; int priority; unsigned int generation; }; @@ -118,7 +118,7 @@ struct mem_cgroup_reclaim_iter { /* * per-zone information in memory controller. */ -struct mem_cgroup_per_zone { +struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[NR_LRU_LISTS]; @@ -132,10 +132,6 @@ struct mem_cgroup_per_zone { /* use container_of */ }; -struct mem_cgroup_per_node { - struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; -}; - struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; @@ -314,8 +310,46 @@ void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); -struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); -struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) +{ + return memcg->nodeinfo[nid]; +} + +/** + * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone + * @node: node of the wanted lruvec + * @memcg: memcg of the wanted lruvec + * + * Returns the lru list vector holding pages for a given @node or a given + * @memcg and @zone. This can be the node lruvec, if the memory controller + * is disabled. + */ +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) +{ + struct mem_cgroup_per_node *mz; + struct lruvec *lruvec; + + if (mem_cgroup_disabled()) { + lruvec = node_lruvec(pgdat); + goto out; + } + + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->pgdat here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->pgdat != pgdat)) + lruvec->pgdat = pgdat; + return lruvec; +} + +struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -404,9 +438,9 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { - struct mem_cgroup_per_zone *mz; + struct mem_cgroup_per_node *mz; - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); return mz->lru_size[lru]; } @@ -477,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, mem_cgroup_update_page_stat(page, idx, -1); } -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); @@ -568,16 +602,16 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) { } -static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, - struct mem_cgroup *memcg) +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) { - return &zone->lruvec; + return node_lruvec(pgdat); } static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, - struct zone *zone) + struct pglist_data *pgdat) { - return &zone->lruvec; + return &pgdat->lruvec; } static inline bool mm_match_cgroup(struct mm_struct *mm, @@ -681,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, } static inline -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) { @@ -749,6 +783,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } #endif +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void memcg_kmem_uncharge(struct page *page, int order); + #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; @@ -770,22 +811,6 @@ static inline bool memcg_kmem_enabled(void) } /* - * In general, we'll do everything in our power to not incur in any overhead - * for non-memcg users for the kmem functions. Not even a function call, if we - * can avoid it. - * - * Therefore, we'll inline all those functions so that in the best case, we'll - * see that kmemcg is off for everybody and proceed quickly. If it is on, - * we'll still do most of the flag checking inline. We check a lot of - * conditions, but because they are pretty simple, they are expected to be - * fast. - */ -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg); -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); -void __memcg_kmem_uncharge(struct page *page, int order); - -/* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function * will return -1 when this is not a kmem-limited memcg. @@ -795,67 +820,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); -void __memcg_kmem_put_cache(struct kmem_cache *cachep); - -static inline bool __memcg_kmem_bypass(void) -{ - if (!memcg_kmem_enabled()) - return true; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return true; - return false; -} - -/** - * memcg_kmem_charge: charge a kmem page - * @page: page to charge - * @gfp: reclaim mode - * @order: allocation order - * - * Returns 0 on success, an error code on failure. - */ -static __always_inline int memcg_kmem_charge(struct page *page, - gfp_t gfp, int order) -{ - if (__memcg_kmem_bypass()) - return 0; - if (!(gfp & __GFP_ACCOUNT)) - return 0; - return __memcg_kmem_charge(page, gfp, order); -} - -/** - * memcg_kmem_uncharge: uncharge a kmem page - * @page: page to uncharge - * @order: allocation order - */ -static __always_inline void memcg_kmem_uncharge(struct page *page, int order) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge(page, order); -} - -/** - * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation - * @cachep: the original global kmem cache - * - * All memory allocated from a per-memcg cache is charged to the owner memcg. - */ -static __always_inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - if (__memcg_kmem_bypass()) - return cachep; - return __memcg_kmem_get_cache(cachep, gfp); -} - -static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_put_cache(cachep); -} - /** * memcg_kmem_update_page_stat - update kmem page state statistics * @page: the page @@ -878,15 +842,6 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) -{ - return 0; -} - -static inline void memcg_kmem_uncharge(struct page *page, int order) -{ -} - static inline int memcg_cache_id(struct mem_cgroup *memcg) { return -1; @@ -900,16 +855,6 @@ static inline void memcg_put_cache_ids(void) { } -static inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - return cachep; -} - -static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ -} - static inline void memcg_kmem_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 5145620ba48a..01033fadea47 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -284,5 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); +extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index bcaa634139a9..93416196ba64 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap { unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); -#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE) +#ifdef CONFIG_ZONE_DEVICE struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); #else static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index c72d5344f3b3..cadc6543909d 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -20,6 +20,7 @@ #define RN5T618_OTPVER 0x01 #define RN5T618_IODAC 0x02 #define RN5T618_VINDAC 0x03 +#define RN5T618_OUT32KEN 0x05 #define RN5T618_CPUCNT 0x06 #define RN5T618_PSWR 0x07 #define RN5T618_PONHIS 0x09 @@ -38,6 +39,7 @@ #define RN5T618_DC1_SLOT 0x16 #define RN5T618_DC2_SLOT 0x17 #define RN5T618_DC3_SLOT 0x18 +#define RN5T618_DC4_SLOT 0x19 #define RN5T618_LDO1_SLOT 0x1b #define RN5T618_LDO2_SLOT 0x1c #define RN5T618_LDO3_SLOT 0x1d @@ -54,12 +56,16 @@ #define RN5T618_DC2CTL2 0x2f #define RN5T618_DC3CTL 0x30 #define RN5T618_DC3CTL2 0x31 +#define RN5T618_DC4CTL 0x32 +#define RN5T618_DC4CTL2 0x33 #define RN5T618_DC1DAC 0x36 #define RN5T618_DC2DAC 0x37 #define RN5T618_DC3DAC 0x38 +#define RN5T618_DC4DAC 0x39 #define RN5T618_DC1DAC_SLP 0x3b #define RN5T618_DC2DAC_SLP 0x3c #define RN5T618_DC3DAC_SLP 0x3d +#define RN5T618_DC4DAC_SLP 0x3e #define RN5T618_DCIREN 0x40 #define RN5T618_DCIRQ 0x41 #define RN5T618_DCIRMON 0x42 @@ -211,6 +217,7 @@ enum { RN5T618_DCDC1, RN5T618_DCDC2, RN5T618_DCDC3, + RN5T618_DCDC4, RN5T618_LDO1, RN5T618_LDO2, RN5T618_LDO3, @@ -221,8 +228,14 @@ enum { RN5T618_REG_NUM, }; +enum { + RN5T567 = 0, + RN5T618, +}; + struct rn5t618 { struct regmap *regmap; + long variant; }; #endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index ac7fba44d7e4..1c88231496d3 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -257,6 +257,7 @@ struct tps65217 { unsigned long id; struct regulator_desc desc[TPS65217_NUM_REGULATOR]; struct regmap *regmap; + u8 *strobes; }; static inline struct tps65217 *dev_to_tps65217(struct device *dev) diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index d58f3b5f585a..7fdf5326f34e 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -246,6 +246,7 @@ enum tps65218_irqs { * @name: Voltage regulator name * @min_uV: minimum micro volts * @max_uV: minimum micro volts + * @strobe: sequencing strobe value for the regulator * * This data is used to check the regualtor voltage limits while setting. */ @@ -254,6 +255,7 @@ struct tps_info { const char *name; int min_uV; int max_uV; + int strobe; }; /** diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 2e5b194b9b19..257173e0095e 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,7 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 +#define MICREL_PHY_FXEN 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b50325e4ddf..ae8d475a9385 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,6 +37,8 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern void putback_movable_page(struct page *page); extern int migrate_prep(void); extern int migrate_prep_local(void); @@ -69,6 +71,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ +#ifdef CONFIG_COMPACTION +extern int PageMovable(struct page *page); +extern void __SetPageMovable(struct page *page, struct address_space *mapping); +extern void __ClearPageMovable(struct page *page); +#else +static inline int PageMovable(struct page *page) { return 0; }; +static inline void __SetPageMovable(struct page *page, + struct address_space *mapping) +{ +} +static inline void __ClearPageMovable(struct page *page) +{ +} +#endif + #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index d46a0e7f144d..e6f6910278f3 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -536,6 +536,7 @@ struct mlx4_caps { int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; + int max_tc_eth; u32 *qp0_qkey; u32 *qp0_proxy; u32 *qp1_proxy; @@ -1495,6 +1496,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); +int mlx4_max_tc(struct mlx4_dev *dev); /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 587cdf943b52..deaa2217214d 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -291,16 +291,18 @@ enum { MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, }; +union mlx4_wqe_qpn_vlan { + struct { + __be16 vlan_tag; + u8 ins_vlan; + u8 fence_size; + }; + __be32 bf_qpn; +}; + struct mlx4_wqe_ctrl_seg { __be32 owner_opcode; - union { - struct { - __be16 vlan_tag; - u8 ins_vlan; - u8 fence_size; - }; - __be32 bf_qpn; - }; + union mlx4_wqe_qpn_vlan qpn_vlan; /* * High 24 bits are SRC remote buffer; low 8 bits are flags: * [7] SO (strong ordering) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 73a48479892d..0b6d15cddb2f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -129,6 +129,13 @@ __mlx5_mask(typ, fld)) tmp; \ }) +enum mlx5_inline_modes { + MLX5_INLINE_MODE_NONE, + MLX5_INLINE_MODE_L2, + MLX5_INLINE_MODE_IP, + MLX5_INLINE_MODE_TCP_UDP, +}; + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -1330,6 +1337,7 @@ enum mlx5_cap_type { MLX5_CAP_ESWITCH, MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1414,6 +1422,9 @@ enum mlx5_cap_type { MLX5_GET(vector_calc_cap, \ mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) +#define MLX5_CAP_QOS(mdev, cap)\ + MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fd72ecf0ce9f..a041b99fceac 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -469,7 +469,7 @@ struct mlx5_irq_info { }; struct mlx5_fc_stats { - struct list_head list; + struct rb_root counters; struct list_head addlist; /* protect addlist add/splice operations */ spinlock_t addlist_lock; @@ -481,6 +481,21 @@ struct mlx5_fc_stats { struct mlx5_eswitch; +struct mlx5_rl_entry { + u32 rate; + u16 index; + u16 refcount; +}; + +struct mlx5_rl_table { + /* protect rate limit table */ + struct mutex rl_lock; + u16 max_size; + u32 max_rate; + u32 min_rate; + struct mlx5_rl_entry *rl_entry; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -535,15 +550,12 @@ struct mlx5_priv { struct list_head ctx_list; spinlock_t ctx_lock; + struct mlx5_flow_steering *steering; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; unsigned long pci_dev_data; - struct mlx5_flow_root_namespace *root_ns; - struct mlx5_flow_root_namespace *fdb_root_ns; - struct mlx5_flow_root_namespace *esw_egress_root_ns; - struct mlx5_flow_root_namespace *esw_ingress_root_ns; - struct mlx5_fc_stats fc_stats; + struct mlx5_rl_table rl_table; }; enum mlx5_device_state { @@ -562,6 +574,18 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; +struct mlx5_td { + struct list_head tirs_list; + u32 tdn; +}; + +struct mlx5e_resources { + struct mlx5_uar cq_uar; + u32 pdn; + struct mlx5_td td; + struct mlx5_core_mkey mkey; +}; + struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ @@ -586,6 +610,7 @@ struct mlx5_core_dev { struct mlx5_profile *profile; atomic_t num_qps; u32 issi; + struct mlx5e_resources mlx5e_res; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif @@ -862,6 +887,12 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +int mlx5_init_rl_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); + static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; @@ -939,6 +970,11 @@ static inline int mlx5_get_gid_table_len(u16 param) return 8 * (1 << param); } +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) +{ + return !!(dev->priv.rl_table.max_size); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 4b7a107d9c19..e036d6030867 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -54,6 +54,8 @@ static inline void build_leftovers_ft_param(int *priority, enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_OFFLOADS, + MLX5_FLOW_NAMESPACE_ETHTOOL, MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_ANCHOR, @@ -67,6 +69,12 @@ struct mlx5_flow_group; struct mlx5_flow_rule; struct mlx5_flow_namespace; +struct mlx5_flow_spec { + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; + u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; +}; + struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { @@ -115,9 +123,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); */ struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index e955a2859009..21bc4557b67a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -123,6 +123,10 @@ enum { MLX5_CMD_OP_DRAIN_DCT = 0x712, MLX5_CMD_OP_QUERY_DCT = 0x713, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, + MLX5_CMD_OP_CREATE_XRQ = 0x717, + MLX5_CMD_OP_DESTROY_XRQ = 0x718, + MLX5_CMD_OP_QUERY_XRQ = 0x719, + MLX5_CMD_OP_ARM_XRQ = 0x71a, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -139,6 +143,8 @@ enum { MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, + MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, @@ -362,7 +368,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { }; struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_at_0[0x20]; + u8 reserved_at_0[0x8]; + u8 source_sqn[0x18]; u8 reserved_at_20[0x10]; u8 source_port[0x10]; @@ -508,6 +515,17 @@ struct mlx5_ifc_e_switch_cap_bits { u8 reserved_at_20[0x7e0]; }; +struct mlx5_ifc_qos_cap_bits { + u8 packet_pacing[0x1]; + u8 reserved_0[0x1f]; + u8 reserved_1[0x20]; + u8 packet_pacing_max_rate[0x20]; + u8 packet_pacing_min_rate[0x20]; + u8 reserved_2[0x10]; + u8 packet_pacing_rate_table_size[0x10]; + u8 reserved_3[0x760]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -518,7 +536,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_at_10[0x4]; + u8 reserved_at_10[0x2]; + u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; u8 scatter_fcs[0x1]; @@ -747,7 +766,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; - u8 reserved_at_182[0x4]; + u8 retransmission_q_counters[0x1]; + u8 reserved_at_183[0x3]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -774,7 +794,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1d0[0x6]; + u8 reserved_at_1d0[0x1]; + u8 dcbx[0x1]; + u8 reserved_at_1d2[0x4]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; @@ -806,7 +828,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; - u8 reserved_at_21b[0x1]; + u8 qos[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; @@ -872,7 +894,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_340[0x20]; + u8 reserved_at_340[0x8]; + u8 log_max_flow_counter_bulk[0x8]; + u8 max_flow_counter[0x10]; + u8 reserved_at_360[0x3]; u8 log_max_rq[0x5]; @@ -932,7 +957,15 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; - u8 reserved_at_5e0[0x220]; + u8 reserved_at_5e0[0x10]; + u8 tag_matching[0x1]; + u8 rndv_offload_rc[0x1]; + u8 rndv_offload_dc[0x1]; + u8 log_tag_matching_list_sz[0x5]; + u8 reserved_at_5e8[0x3]; + u8 log_max_xrq[0x5]; + + u8 reserved_at_5f0[0x200]; }; enum mlx5_flow_destination_type { @@ -951,7 +984,8 @@ struct mlx5_ifc_dest_format_struct_bits { }; struct mlx5_ifc_flow_counter_list_bits { - u8 reserved_at_0[0x10]; + u8 clear[0x1]; + u8 num_of_counters[0xf]; u8 flow_counter_id[0x10]; u8 reserved_at_20[0x20]; @@ -1970,7 +2004,7 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_560[0x5]; u8 rq_type[0x3]; - u8 srqn_rmpn[0x18]; + u8 srqn_rmpn_xrqn[0x18]; u8 reserved_at_580[0x8]; u8 rmsn[0x18]; @@ -2021,6 +2055,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; + struct mlx5_ifc_qos_cap_bits qos_cap; u8 reserved_at_0[0x8000]; }; @@ -2236,7 +2271,8 @@ struct mlx5_ifc_sqc_bits { u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_4[0x4]; + u8 reserved_at_4[0x1]; + u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; u8 reserved_at_d[0x13]; @@ -2247,8 +2283,9 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0xa0]; + u8 reserved_at_60[0x90]; + u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 reserved_at_110[0x10]; @@ -2332,7 +2369,9 @@ struct mlx5_ifc_rmpc_bits { }; struct mlx5_ifc_nic_vport_context_bits { - u8 reserved_at_0[0x1f]; + u8 reserved_at_0[0x5]; + u8 min_wqe_inline_mode[0x3]; + u8 reserved_at_8[0x17]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; @@ -2596,7 +2635,7 @@ struct mlx5_ifc_dctc_bits { u8 reserved_at_98[0x8]; u8 reserved_at_a0[0x8]; - u8 srqn[0x18]; + u8 srqn_xrqn[0x18]; u8 reserved_at_c0[0x8]; u8 pd[0x18]; @@ -2648,6 +2687,7 @@ enum { enum { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + MLX5_CQ_PERIOD_NUM_MODES }; struct mlx5_ifc_cqc_bits { @@ -2725,6 +2765,54 @@ struct mlx5_ifc_query_adapter_param_block_bits { u8 vsd_contd_psid[16][0x8]; }; +enum { + MLX5_XRQC_STATE_GOOD = 0x0, + MLX5_XRQC_STATE_ERROR = 0x1, +}; + +enum { + MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, + MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, +}; + +enum { + MLX5_XRQC_OFFLOAD_RNDV = 0x1, +}; + +struct mlx5_ifc_tag_matching_topology_context_bits { + u8 log_matching_list_sz[0x4]; + u8 reserved_at_4[0xc]; + u8 append_next_index[0x10]; + + u8 sw_phase_cnt[0x10]; + u8 hw_phase_cnt[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_xrqc_bits { + u8 state[0x4]; + u8 rlkey[0x1]; + u8 reserved_at_5[0xf]; + u8 topology[0x4]; + u8 reserved_at_18[0x4]; + u8 offload[0x4]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; + + u8 reserved_at_180[0x180]; + + struct mlx5_ifc_wq_bits wq; +}; + union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; @@ -3147,6 +3235,30 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 reserved_at_800[0x80]; }; +struct mlx5_ifc_query_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + +struct mlx5_ifc_query_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -3550,7 +3662,27 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 out_of_sequence[0x20]; - u8 reserved_at_1e0[0x620]; + u8 reserved_at_1e0[0x20]; + + u8 duplicate_request[0x20]; + + u8 reserved_at_220[0x20]; + + u8 rnr_nak_retry_err[0x20]; + + u8 reserved_at_260[0x20]; + + u8 packet_seq_err[0x20]; + + u8 reserved_at_2a0[0x20]; + + u8 implied_nak_seq_err[0x20]; + + u8 reserved_at_2e0[0x20]; + + u8 local_ack_timeout_err[0x20]; + + u8 reserved_at_320[0x4e0]; }; struct mlx5_ifc_query_q_counter_in_bits { @@ -5004,6 +5136,28 @@ struct mlx5_ifc_detach_from_mcg_in_bits { u8 multicast_gid[16][0x8]; }; +struct mlx5_ifc_destroy_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5589,6 +5743,30 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_create_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6130,6 +6308,29 @@ struct mlx5_ifc_attach_to_mcg_in_bits { u8 multicast_gid[16][0x8]; }; +struct mlx5_ifc_arm_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_arm_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x10]; + u8 lwm[0x10]; +}; + struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6167,7 +6368,8 @@ struct mlx5_ifc_arm_rq_out_bits { }; enum { - MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, + MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, + MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2, }; struct mlx5_ifc_arm_rq_in_bits { @@ -6360,6 +6562,30 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 vxlan_udp_port[0x10]; }; +struct mlx5_ifc_set_rate_limit_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_rate_limit_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 rate_limit_index[0x10]; + + u8 reserved_at_60[0x20]; + + u8 rate_limit[0x20]; +}; + struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -6484,12 +6710,15 @@ struct mlx5_ifc_pude_reg_bits { }; struct mlx5_ifc_ptys_reg_bits { - u8 reserved_at_0[0x8]; + u8 an_disable_cap[0x1]; + u8 an_disable_admin[0x1]; + u8 reserved_at_2[0x6]; u8 local_port[0x8]; u8 reserved_at_10[0xd]; u8 proto_mask[0x3]; - u8 reserved_at_20[0x40]; + u8 an_status[0x4]; + u8 reserved_at_24[0x3c]; u8 eth_proto_capability[0x20]; @@ -7450,4 +7679,34 @@ struct mlx5_ifc_mcia_reg_bits { u8 dword_11[0x20]; }; +struct mlx5_ifc_dcbx_param_bits { + u8 dcbx_cee_cap[0x1]; + u8 dcbx_ieee_cap[0x1]; + u8 dcbx_standby_cap[0x1]; + u8 reserved_at_0[0x5]; + u8 port_number[0x8]; + u8 reserved_at_10[0xa]; + u8 max_application_table_size[6]; + u8 reserved_at_20[0x15]; + u8 version_oper[0x3]; + u8 reserved_at_38[5]; + u8 version_admin[0x3]; + u8 willing_admin[0x1]; + u8 reserved_at_41[0x3]; + u8 pfc_cap_oper[0x4]; + u8 reserved_at_48[0x4]; + u8 pfc_cap_admin[0x4]; + u8 reserved_at_50[0x4]; + u8 num_of_tc_oper[0x4]; + u8 reserved_at_58[0x4]; + u8 num_of_tc_admin[0x4]; + u8 remote_willing[0x1]; + u8 reserved_at_61[3]; + u8 remote_pfc_cap[4]; + u8 reserved_at_68[0x14]; + u8 remote_num_of_tc[0x4]; + u8 reserved_at_80[0x18]; + u8 error[0x8]; + u8 reserved_at_a0[0x160]; +}; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 9851862c0ec5..e3012cc64b8a 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -47,6 +47,14 @@ enum mlx5_module_id { MLX5_MODULE_ID_QSFP28 = 0x11, }; +enum mlx5_an_status { + MLX5_AN_UNAVAILABLE = 0, + MLX5_AN_COMPLETE = 1, + MLX5_AN_FAILED = 2, + MLX5_AN_LINK_UP = 3, + MLX5_AN_LINK_DOWN = 4, +}; + #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 @@ -65,13 +73,17 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, int proto_mask, u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask); +void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 6c16c198f680..e087b7d047ac 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -43,6 +43,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); +void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); diff --git a/include/linux/mm.h b/include/linux/mm.h index ece042dfe23c..08ed53eeedd5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -309,10 +309,34 @@ struct vm_fault { * VM_FAULT_DAX_LOCKED and fill in * entry here. */ - /* for ->map_pages() only */ - pgoff_t max_pgoff; /* map pages for offset from pgoff till - * max_pgoff inclusive */ - pte_t *pte; /* pte entry associated with ->pgoff */ +}; + +/* + * Page fault context: passes though page fault handler instead of endless list + * of function arguments. + */ +struct fault_env { + struct vm_area_struct *vma; /* Target VMA */ + unsigned long address; /* Faulting virtual address */ + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' + */ + pte_t *pte; /* Pointer to pte entry matching + * the 'address'. NULL if the page + * table hasn't been allocated. + */ + spinlock_t *ptl; /* Page table lock. + * Protects pte page table if 'pte' + * is not NULL, otherwise pmd. + */ + pgtable_t prealloc_pte; /* Pre-allocated pte page table. + * vm_ops->map_pages() calls + * alloc_set_pte() from atomic context. + * do_fault_around() pre-allocates + * page table to avoid allocation from + * atomic context. + */ }; /* @@ -327,7 +351,8 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); + void (*map_pages)(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ @@ -537,7 +562,6 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a @@ -601,8 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon); +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, + struct page *page); #endif /* @@ -909,6 +933,11 @@ static inline struct zone *page_zone(const struct page *page) return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } +static inline pg_data_t *page_pgdat(const struct page *page) +{ + return NODE_DATA(page_to_nid(page)); +} + #ifdef SECTION_IN_PAGE_FLAGS static inline void set_page_section(struct page *page, unsigned long section) { @@ -949,11 +978,21 @@ static inline struct mem_cgroup *page_memcg(struct page *page) { return page->mem_cgroup; } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(page->mem_cgroup); +} #else static inline struct mem_cgroup *page_memcg(struct page *page) { return NULL; } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return NULL; +} #endif /* @@ -1035,6 +1074,7 @@ static inline pgoff_t page_file_index(struct page *page) } bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page); /* * Return true only if the page has been allocated with @@ -1215,15 +1255,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); @@ -2063,7 +2102,8 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void filemap_map_pages(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ @@ -2259,6 +2299,8 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) } #endif /* __HAVE_ARCH_GATE_AREA */ +extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); + #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; int drop_caches_sysctl_handler(struct ctl_table *, int, diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 5bd29ba4f174..71613e8a720f 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -23,25 +23,30 @@ static inline int page_is_file_cache(struct page *page) } static __always_inline void __update_lru_size(struct lruvec *lruvec, - enum lru_list lru, int nr_pages) + enum lru_list lru, enum zone_type zid, + int nr_pages) { - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); + __mod_zone_page_state(&pgdat->node_zones[zid], + NR_ZONE_LRU_BASE + lru, nr_pages); } static __always_inline void update_lru_size(struct lruvec *lruvec, - enum lru_list lru, int nr_pages) + enum lru_list lru, enum zone_type zid, + int nr_pages) { + __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, nr_pages); -#else - __update_lru_size(lruvec, lru, nr_pages); #endif } static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } @@ -49,7 +54,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { list_del(&page->lru); - update_lru_size(lruvec, lru, -hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); } /** diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ca3e517980a0..903200f4ec41 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -60,51 +60,52 @@ struct page { }; /* Second double word */ - struct { - union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* sl[aou]b first free object */ - /* page_deferred_list().prev -- second tail page */ - }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* sl[aou]b first free object */ + /* page_deferred_list().prev -- second tail page */ + }; - union { + union { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - /* Used for cmpxchg_double in slub */ - unsigned long counters; + /* Used for cmpxchg_double in slub */ + unsigned long counters; #else - /* - * Keep _refcount separate from slub cmpxchg_double - * data. As the rest of the double word is protected by - * slab_lock but _refcount is not. - */ - unsigned counters; + /* + * Keep _refcount separate from slub cmpxchg_double data. + * As the rest of the double word is protected by slab_lock + * but _refcount is not. + */ + unsigned counters; #endif + struct { - struct { - - union { - /* - * Count of ptes mapped in mms, to show - * when page is mapped & limit reverse - * map searches. - */ - atomic_t _mapcount; - - struct { /* SLUB */ - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; - }; - int units; /* SLOB */ - }; + union { /* - * Usage count, *USE WRAPPER FUNCTION* - * when manual accounting. See page_ref.h + * Count of ptes mapped in mms, to show when + * page is mapped & limit reverse map searches. + * + * Extra information about page type may be + * stored here for pages that are never mapped, + * in which case the value MUST BE <= -2. + * See page-flags.h for more details. */ - atomic_t _refcount; + atomic_t _mapcount; + + unsigned int active; /* SLAB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + int units; /* SLOB */ }; - unsigned int active; /* SLAB */ + /* + * Usage count, *USE WRAPPER FUNCTION* when manual + * accounting. See page_ref.h + */ + atomic_t _refcount; }; }; @@ -117,7 +118,7 @@ struct page { */ union { struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! + * protected by zone_lru_lock ! * Can be used as a generic list * by the page owner. */ @@ -594,6 +595,9 @@ struct vm_special_mapping { int (*fault)(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf); + + int (*mremap)(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma); }; enum tlb_flush_reason { diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index de7be78c6f0e..451a811f48f2 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -39,6 +39,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) @@ -47,6 +48,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 02069c23486d..f2e4e90621ec 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -93,7 +93,7 @@ struct free_area { struct pglist_data; /* - * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. + * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. * So add a wild amount of padding here to ensure that they fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. @@ -110,36 +110,23 @@ struct zone_padding { enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, - NR_ALLOC_BATCH, - NR_LRU_BASE, - NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ - NR_ACTIVE_ANON, /* " " " " " */ - NR_INACTIVE_FILE, /* " " " " " */ - NR_ACTIVE_FILE, /* " " " " " */ - NR_UNEVICTABLE, /* " " " " " */ + NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ + NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, + NR_ZONE_ACTIVE_ANON, + NR_ZONE_INACTIVE_FILE, + NR_ZONE_ACTIVE_FILE, + NR_ZONE_UNEVICTABLE, + NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ - NR_ANON_PAGES, /* Mapped anonymous pages */ - NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. - only modified from process context */ - NR_FILE_PAGES, - NR_FILE_DIRTY, - NR_WRITEBACK, NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK, + NR_KERNEL_STACK_KB, /* measured in KiB */ /* Second 128 byte cacheline */ - NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, - NR_VMSCAN_WRITE, - NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ - NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ - NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ - NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ - NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ - NR_DIRTIED, /* page dirtyings since bootup */ - NR_WRITTEN, /* page writings since bootup */ - NR_PAGES_SCANNED, /* pages scanned since last reclaim */ +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ @@ -148,12 +135,40 @@ enum zone_stat_item { NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ #endif + NR_FREE_CMA_PAGES, + NR_VM_ZONE_STAT_ITEMS }; + +enum node_stat_item { + NR_LRU_BASE, + NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ + NR_ACTIVE_ANON, /* " " " " " */ + NR_INACTIVE_FILE, /* " " " " " */ + NR_ACTIVE_FILE, /* " " " " " */ + NR_UNEVICTABLE, /* " " " " " */ + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + NR_PAGES_SCANNED, /* pages scanned since last reclaim */ WORKINGSET_REFAULT, WORKINGSET_ACTIVATE, WORKINGSET_NODERECLAIM, - NR_ANON_TRANSPARENT_HUGEPAGES, - NR_FREE_CMA_PAGES, - NR_VM_ZONE_STAT_ITEMS }; + NR_ANON_MAPPED, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_SHMEM_THPS, + NR_SHMEM_PMDMAPPED, + NR_ANON_THPS, + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_VMSCAN_WRITE, + NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ + NR_VM_NODE_STAT_ITEMS +}; /* * We do arithmetic on the LRU lists in various places in the code, @@ -210,7 +225,7 @@ struct lruvec { /* Evictions & activations on the inactive file list */ atomic_long_t inactive_age; #ifdef CONFIG_MEMCG - struct zone *zone; + struct pglist_data *pgdat; #endif }; @@ -262,6 +277,11 @@ struct per_cpu_pageset { #endif }; +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; +}; + #endif /* !__GENERATING_BOUNDS.H */ enum zone_type { @@ -343,22 +363,9 @@ struct zone { #ifdef CONFIG_NUMA int node; #endif - - /* - * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on - * this zone's LRU. Maintained by the pageout code. - */ - unsigned int inactive_ratio; - struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; - /* - * This is a per-zone reserve of pages that are not available - * to userspace allocations. - */ - unsigned long totalreserve_pages; - #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. @@ -367,14 +374,6 @@ struct zone { unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NUMA - /* - * zone reclaim becomes active if more unmapped pages exist. - */ - unsigned long min_unmapped_pages; - unsigned long min_slab_pages; -#endif /* CONFIG_NUMA */ - /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; @@ -467,24 +466,21 @@ struct zone { unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; + /* Write-intensive fields used from the page allocator */ ZONE_PADDING(_pad1_) + /* free areas of different sizes */ struct free_area free_area[MAX_ORDER]; /* zone flags, see below */ unsigned long flags; - /* Write-intensive fields used from the page allocator */ + /* Primarily protects free_area */ spinlock_t lock; + /* Write-intensive fields used by compaction and vmstats. */ ZONE_PADDING(_pad2_) - /* Write-intensive fields used by page reclaim */ - - /* Fields commonly accessed by the page reclaim scanner */ - spinlock_t lru_lock; - struct lruvec lruvec; - /* * When free pages are below this point, additional steps are taken * when reading the number of free pages to avoid per-cpu counter @@ -522,20 +518,18 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -enum zone_flags { - ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ - ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ - ZONE_CONGESTED, /* zone has many dirty pages backed by +enum pgdat_flags { + PGDAT_CONGESTED, /* pgdat has many dirty pages backed by * a congested BDI */ - ZONE_DIRTY, /* reclaim scanning has recently found + PGDAT_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ - ZONE_WRITEBACK, /* reclaim scanning has recently found + PGDAT_WRITEBACK, /* reclaim scanning has recently found * many pages under writeback */ - ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ + PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; static inline unsigned long zone_end_pfn(const struct zone *zone) @@ -659,8 +653,9 @@ typedef struct pglist_data { wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ - int kswapd_max_order; - enum zone_type classzone_idx; + int kswapd_order; + enum zone_type kswapd_classzone_idx; + #ifdef CONFIG_COMPACTION int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; @@ -677,6 +672,23 @@ typedef struct pglist_data { /* Number of pages migrated during the rate limiting time interval */ unsigned long numabalancing_migrate_nr_pages; #endif + /* + * This is a per-node reserve of pages that are not available + * to userspace allocations. + */ + unsigned long totalreserve_pages; + +#ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; +#endif /* CONFIG_NUMA */ + + /* Write-intensive fields used by page reclaim */ + ZONE_PADDING(_pad1_) + spinlock_t lru_lock; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* @@ -691,6 +703,23 @@ typedef struct pglist_data { struct list_head split_queue; unsigned long split_queue_len; #endif + + /* Fields commonly accessed by the page reclaim scanner */ + struct lruvec lruvec; + + /* + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on + * this node's LRU. Maintained by the pageout code. + */ + unsigned int inactive_ratio; + + unsigned long flags; + + ZONE_PADDING(_pad2_) + + /* Per-node vmstats */ + struct per_cpu_nodestat __percpu *per_cpu_nodestats; + atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -704,6 +733,15 @@ typedef struct pglist_data { #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) +static inline spinlock_t *zone_lru_lock(struct zone *zone) +{ + return &zone->zone_pgdat->lru_lock; +} + +static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) +{ + return &pgdat->lruvec; +} static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) { @@ -756,12 +794,12 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, extern void lruvec_init(struct lruvec *lruvec); -static inline struct zone *lruvec_zone(struct lruvec *lruvec) +static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) { #ifdef CONFIG_MEMCG - return lruvec->zone; + return lruvec->pgdat; #else - return container_of(lruvec, struct zone, lruvec); + return container_of(lruvec, struct pglist_data, lruvec); #endif } diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6e4c645e1c0d..ed84c07f6a51 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -657,4 +657,20 @@ struct ulpi_device_id { kernel_ulong_t driver_data; }; +/** + * struct fsl_mc_device_id - MC object device identifier + * @vendor: vendor ID + * @obj_type: MC object type + * @ver_major: MC object version major number + * @ver_minor: MC object version minor number + * + * Type of entries in the "device Id" table for MC object devices supported by + * a MC object device driver. The last entry of the table has vendor set to 0x0 + */ +struct fsl_mc_device_id { + __u16 vendor; + const char obj_type[16]; +}; + + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3a5abe95affd..1cc5ffb769af 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); -int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); -int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); #define log_mpidump g10_log_mpidump diff --git a/include/linux/mroute.h b/include/linux/mroute.h index bf9b322cb0b0..d351fd3e1049 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -104,6 +104,7 @@ struct mfc_cache { unsigned long bytes; unsigned long pkt; unsigned long wrong_if; + unsigned long lastuse; unsigned char ttls[MAXVIFS]; /* TTL thresholds */ } res; } mfc_un; diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 66982e764051..3987b64040c5 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -92,6 +92,7 @@ struct mfc6_cache { unsigned long bytes; unsigned long pkt; unsigned long wrong_if; + unsigned long lastuse; unsigned char ttls[MAXMIFS]; /* TTL thresholds */ } res; } mfc_un; diff --git a/include/linux/msi.h b/include/linux/msi.h index 8b425c66305a..4f0bfe5912b2 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -47,6 +47,7 @@ struct fsl_mc_msi_desc { * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse + * @affinity: Optional pointer to a cpu affinity mask for this descriptor * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X @@ -67,6 +68,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; + const struct cpumask *affinity; union { /* PCI MSI/X specific data */ @@ -264,12 +266,10 @@ enum { * callbacks. */ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), - /* Build identity map between hwirq and irq */ - MSI_FLAG_IDENTITY_MAP = (1 << 2), /* Support multiple PCI MSI interrupts */ - MSI_FLAG_MULTI_PCI_MSI = (1 << 3), + MSI_FLAG_MULTI_PCI_MSI = (1 << 2), /* Support PCI MSIX interrupts */ - MSI_FLAG_PCI_MSIX = (1 << 4), + MSI_FLAG_PCI_MSIX = (1 << 3), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, diff --git a/include/linux/namei.h b/include/linux/namei.h index d3d0398f2a1b..f29abda31e6d 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -81,8 +81,6 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); -struct qstr; -extern struct dentry *lookup_hash(const struct qstr *, struct dentry *); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/nd.h b/include/linux/nd.h index aee2761d294c..f1ea426d6a5e 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -26,6 +26,7 @@ struct nd_device_driver { unsigned long type; int (*probe)(struct device *dev); int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); void (*notify)(struct device *dev, enum nvdimm_event event); }; @@ -67,7 +68,7 @@ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; resource_size_t size; - void __pmem *addr; + void *addr; struct badblocks bb; }; diff --git a/include/linux/net.h b/include/linux/net.h index 25aa03b51c4e..b9f0ff4d489c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -185,6 +185,7 @@ struct proto_ops { ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); + int (*peek_len)(struct socket *sock); }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index aa7b2400f98c..9c6c8ef2e9e7 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -53,8 +53,9 @@ enum { * headers in software. */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ + NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_TUNNEL_REMCSUM_BIT, + NETIF_F_GSO_SCTP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -128,6 +129,7 @@ enum { #define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) +#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -166,7 +168,8 @@ enum { NETIF_F_FSO) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO) +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \ + NETIF_F_GSO_SCTP) /* * If one device supports one of these features, then enable them diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index da4b33bea982..076df5360ba5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -61,6 +61,9 @@ struct wireless_dev; /* 802.15.4 specific */ struct wpan_dev; struct mpls_dev; +/* UDP Tunnel offloads */ +struct udp_tunnel_info; +struct bpf_prog; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -90,7 +93,6 @@ void netdev_set_default_ethtool_ops(struct net_device *dev, #define NET_XMIT_SUCCESS 0x00 #define NET_XMIT_DROP 0x01 /* skb dropped */ #define NET_XMIT_CN 0x02 /* congestion notification */ -#define NET_XMIT_POLICED 0x03 /* skb is shot by police */ #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It @@ -785,6 +787,7 @@ enum { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, + TC_SETUP_MATCHALL, }; struct tc_cls_u32_offload; @@ -795,9 +798,37 @@ struct tc_to_netdev { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; + struct tc_cls_matchall_offload *cls_mall; }; }; +/* These structures hold the attributes of xdp state that are being passed + * to the netdevice through the xdp op. + */ +enum xdp_netdev_command { + /* Set or clear a bpf program used in the earliest stages of packet + * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee + * is responsible for calling bpf_prog_put on any old progs that are + * stored. In case of error, the callee need not release the new prog + * reference, but on success it takes ownership and must bpf_prog_put + * when it is no longer used. + */ + XDP_SETUP_PROG, + /* Check if a bpf program is set on the device. The callee should + * return true if a program is currently attached and running. + */ + XDP_QUERY_PROG, +}; + +struct netdev_xdp { + enum xdp_netdev_command command; + union { + /* XDP_SETUP_PROG */ + struct bpf_prog *prog; + /* XDP_QUERY_PROG */ + bool prog_attached; + }; +}; /* * This structure defines the management hooks for network devices. @@ -1025,31 +1056,18 @@ struct tc_to_netdev { * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * - * void (*ndo_add_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify a driver about the UDP port and socket - * address family that vxlan is listening to. It is called only when - * a new port starts listening. The operation is protected by the - * vxlan_net->sock_lock. - * - * void (*ndo_add_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify a driver about the UDP port and socket - * address family that geneve is listnening to. It is called only when - * a new port starts listening. The operation is protected by the - * geneve_net->sock_lock. - * - * void (*ndo_del_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify the driver about a UDP port and socket - * address family that geneve is not listening to anymore. The operation - * is protected by the geneve_net->sock_lock. - * - * void (*ndo_del_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify the driver about a UDP port and socket - * address family that vxlan is not listening to anymore. The operation - * is protected by the vxlan_net->sock_lock. + * void (*ndo_udp_tunnel_add)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify a driver about the UDP port and socket + * address family that a UDP tunnel is listnening to. It is called only + * when a new port starts listening. The operation is protected by the + * RTNL. + * + * void (*ndo_udp_tunnel_del)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify the driver about a UDP port and socket + * address family that the UDP tunnel is not listening to anymore. The + * operation is protected by the RTNL. * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) @@ -1099,6 +1117,9 @@ struct tc_to_netdev { * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. + * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); + * This function is used to set or query state related to XDP on the + * netdevice. See definition of enum xdp_netdev_command for details. * */ struct net_device_ops { @@ -1221,8 +1242,10 @@ struct net_device_ops { netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); - int (*ndo_neigh_construct)(struct neighbour *n); - void (*ndo_neigh_destroy)(struct neighbour *n); + int (*ndo_neigh_construct)(struct net_device *dev, + struct neighbour *n); + void (*ndo_neigh_destroy)(struct net_device *dev, + struct neighbour *n); int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], @@ -1258,18 +1281,10 @@ struct net_device_ops { struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); - void (*ndo_add_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_add_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@ -1289,6 +1304,8 @@ struct net_device_ops { struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + int (*ndo_xdp)(struct net_device *dev, + struct netdev_xdp *xdp); }; /** @@ -1457,6 +1474,8 @@ enum netdev_priv_flags { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations + * @ndisc_ops: Includes callbacks for different IPv6 neighbour + * discovery handling. Necessary for e.g. 6LoWPAN. * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * @@ -1484,8 +1503,7 @@ enum netdev_priv_flags { * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length - * @neigh_priv_len; Used in neigh_alloc(), - * initialized only in atm/clip.c + * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address * @dev_port: Used to differentiate devices that share @@ -1594,7 +1612,8 @@ enum netdev_priv_flags { * @phydev: Physical device may attach itself * for hardware timestamping * - * @qdisc_tx_busylock: XXX: need comments on this one + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -1673,6 +1692,9 @@ struct net_device { #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; #endif +#if IS_ENABLED(CONFIG_IPV6) + const struct ndisc_ops *ndisc_ops; +#endif const struct header_ops *header_ops; @@ -1862,6 +1884,7 @@ struct net_device { #endif struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; bool proto_down; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1944,6 +1967,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); @@ -2233,8 +2273,8 @@ struct netdev_lag_lower_state_info { #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B -#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C -#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D +#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C +#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2370,6 +2410,8 @@ void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); DECLARE_PER_CPU(int, xmit_recursion); +#define XMIT_RECURSION_LIMIT 10 + static inline int dev_recursion_level(void) { return this_cpu_read(xmit_recursion); @@ -3250,6 +3292,7 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); +int dev_change_xdp_fd(struct net_device *dev, int fd); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3799,12 +3842,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter); + #define netdev_for_each_lower_dev(dev, ldev, iter) \ for (iter = (dev)->adj_list.lower.next, \ ldev = netdev_lower_get_next(dev, &(iter)); \ ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) +struct net_device *netdev_all_lower_get_next(struct net_device *dev, + struct list_head **iter); +struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, + struct list_head **iter); + +#define netdev_for_each_all_lower_dev(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next(dev, &(iter))) + +#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) + void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); @@ -3820,6 +3881,10 @@ void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); +int netdev_default_l2upper_neigh_construct(struct net_device *dev, + struct neighbour *n); +void netdev_default_l2upper_neigh_destroy(struct net_device *dev, + struct neighbour *n); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 @@ -4012,6 +4077,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index dc4f58a3cdcc..2ad1a2b289b5 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -6,6 +6,10 @@ #include <linux/static_key.h> #include <uapi/linux/netfilter/x_tables.h> +/* Test a struct->invflags and a boolean for inequality */ +#define NF_INVF(ptr, flag, boolean) \ + ((boolean) ^ !!((ptr)->invflags & (flag))) + /** * struct xt_action_param - parameters for matches/targets * @@ -246,6 +250,10 @@ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); +unsigned int *xt_alloc_entry_offsets(unsigned int size); +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 2ea517c7c6b9..984b2112c77b 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -115,8 +115,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); -/* Used in the kernel match() functions */ -#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ #define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h new file mode 100644 index 000000000000..bf240a3cbf99 --- /dev/null +++ b/include/linux/nvme-rdma.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _LINUX_NVME_RDMA_H +#define _LINUX_NVME_RDMA_H + +enum nvme_rdma_cm_fmt { + NVME_RDMA_CM_FMT_1_0 = 0x0, +}; + +enum nvme_rdma_cm_status { + NVME_RDMA_CM_INVALID_LEN = 0x01, + NVME_RDMA_CM_INVALID_RECFMT = 0x02, + NVME_RDMA_CM_INVALID_QID = 0x03, + NVME_RDMA_CM_INVALID_HSQSIZE = 0x04, + NVME_RDMA_CM_INVALID_HRQSIZE = 0x05, + NVME_RDMA_CM_NO_RSC = 0x06, + NVME_RDMA_CM_INVALID_IRD = 0x07, + NVME_RDMA_CM_INVALID_ORD = 0x08, +}; + +/** + * struct nvme_rdma_cm_req - rdma connect request + * + * @recfmt: format of the RDMA Private Data + * @qid: queue Identifier for the Admin or I/O Queue + * @hrqsize: host receive queue size to be created + * @hsqsize: host send queue size to be created + */ +struct nvme_rdma_cm_req { + __le16 recfmt; + __le16 qid; + __le16 hrqsize; + __le16 hsqsize; + u8 rsvd[24]; +}; + +/** + * struct nvme_rdma_cm_rep - rdma connect reply + * + * @recfmt: format of the RDMA Private Data + * @crqsize: controller receive queue size + */ +struct nvme_rdma_cm_rep { + __le16 recfmt; + __le16 crqsize; + u8 rsvd[28]; +}; + +/** + * struct nvme_rdma_cm_rej - rdma connect reject + * + * @recfmt: format of the RDMA Private Data + * @fsts: error status for the associated connect request + */ +struct nvme_rdma_cm_rej { + __le16 recfmt; + __le16 sts; +}; + +#endif /* _LINUX_NVME_RDMA_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7d51b2904cb7..d8b37bab2887 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -16,6 +16,78 @@ #define _LINUX_NVME_H #include <linux/types.h> +#include <linux/uuid.h> + +/* NQN names in commands fields specified one size */ +#define NVMF_NQN_FIELD_LEN 256 + +/* However the max length of a qualified name is another size */ +#define NVMF_NQN_SIZE 223 + +#define NVMF_TRSVCID_SIZE 32 +#define NVMF_TRADDR_SIZE 256 +#define NVMF_TSAS_SIZE 256 + +#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" + +#define NVME_RDMA_IP_PORT 4420 + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, /* Discovery type target subsystem */ + NVME_NQN_NVME = 2, /* NVME type target subsystem */ +}; + +/* Address Family codes for Discovery Log Page entry ADRFAM field */ +enum { + NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ + NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ + NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ + NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ + NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ +}; + +/* Transport Type codes for Discovery Log Page entry TRTYPE field */ +enum { + NVMF_TRTYPE_RDMA = 1, /* RDMA */ + NVMF_TRTYPE_FC = 2, /* Fibre Channel */ + NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ + NVMF_TRTYPE_MAX, +}; + +/* Transport Requirements codes for Discovery Log Page entry TREQ field */ +enum { + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ +}; + +/* RDMA Connection Management Service Type codes for Discovery Log Page + * entry TSAS RDMA_CMS field + */ +enum { + NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ +}; + +#define NVMF_AQ_DEPTH 32 enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ @@ -50,6 +122,13 @@ enum { #define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) #define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) +/* + * Submission and Completion Queue Entry Sizes for the NVM command set. + * (In bytes and specified as a power of two (2^n)). + */ +#define NVME_NVM_IOSQES 6 +#define NVME_NVM_IOCQES 4 + enum { NVME_CC_ENABLE = 1 << 0, NVME_CC_CSS_NVM = 0 << 4, @@ -61,8 +140,8 @@ enum { NVME_CC_SHN_NORMAL = 1 << 14, NVME_CC_SHN_ABRUPT = 2 << 14, NVME_CC_SHN_MASK = 3 << 14, - NVME_CC_IOSQES = 6 << 16, - NVME_CC_IOCQES = 4 << 20, + NVME_CC_IOSQES = NVME_NVM_IOSQES << 16, + NVME_CC_IOCQES = NVME_NVM_IOCQES << 20, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, @@ -107,7 +186,11 @@ struct nvme_id_ctrl { __u8 mdts; __le16 cntlid; __le32 ver; - __u8 rsvd84[172]; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[156]; __le16 oacs; __u8 acl; __u8 aerl; @@ -119,10 +202,12 @@ struct nvme_id_ctrl { __u8 apsta; __le16 wctemp; __le16 cctemp; - __u8 rsvd270[242]; + __u8 rsvd270[50]; + __le16 kas; + __u8 rsvd322[190]; __u8 sqes; __u8 cqes; - __u8 rsvd514[2]; + __le16 maxcmd; __le32 nn; __le16 oncs; __le16 fuses; @@ -135,7 +220,15 @@ struct nvme_id_ctrl { __le16 acwu; __u8 rsvd534[2]; __le32 sgls; - __u8 rsvd540[1508]; + __u8 rsvd540[228]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[244]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; @@ -274,6 +367,12 @@ struct nvme_reservation_status { } regctl_ds[]; }; +enum nvme_async_event_type { + NVME_AER_TYPE_ERROR = 0, + NVME_AER_TYPE_SMART = 1, + NVME_AER_TYPE_NOTICE = 2, +}; + /* I/O commands */ enum nvme_opcode { @@ -290,6 +389,84 @@ enum nvme_opcode { nvme_cmd_resv_release = 0x15, }; +/* + * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier + * + * @NVME_SGL_FMT_ADDRESS: absolute address of the data block + * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block + * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation + * request subtype + */ +enum { + NVME_SGL_FMT_ADDRESS = 0x00, + NVME_SGL_FMT_OFFSET = 0x01, + NVME_SGL_FMT_INVALIDATE = 0x0f, +}; + +/* + * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier + * + * For struct nvme_sgl_desc: + * @NVME_SGL_FMT_DATA_DESC: data block descriptor + * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor + * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor + * + * For struct nvme_keyed_sgl_desc: + * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor + */ +enum { + NVME_SGL_FMT_DATA_DESC = 0x00, + NVME_SGL_FMT_SEG_DESC = 0x02, + NVME_SGL_FMT_LAST_SEG_DESC = 0x03, + NVME_KEY_SGL_FMT_DATA_DESC = 0x04, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +/* + * Lowest two bits of our flags field (FUSE field in the spec): + * + * @NVME_CMD_FUSE_FIRST: Fused Operation, first command + * @NVME_CMD_FUSE_SECOND: Fused Operation, second command + * + * Highest two bits in our flags field (PSDT field in the spec): + * + * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, + * If used, MPTR contains addr of single physical buffer (byte aligned). + * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, + * If used, MPTR contains an address of an SGL segment containing + * exactly 1 SGL descriptor (qword aligned). + */ +enum { + NVME_CMD_FUSE_FIRST = (1 << 0), + NVME_CMD_FUSE_SECOND = (1 << 1), + + NVME_CMD_SGL_METABUF = (1 << 6), + NVME_CMD_SGL_METASEG = (1 << 7), + NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, +}; + struct nvme_common_command { __u8 opcode; __u8 flags; @@ -297,8 +474,7 @@ struct nvme_common_command { __le32 nsid; __le32 cdw2[2]; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cdw10[6]; }; @@ -309,8 +485,7 @@ struct nvme_rw_command { __le32 nsid; __u64 rsvd2; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; @@ -350,8 +525,7 @@ struct nvme_dsm_cmd { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 nr; __le32 attributes; __u32 rsvd12[4]; @@ -384,6 +558,7 @@ enum nvme_admin_opcode { nvme_admin_async_event = 0x0c, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, + nvme_admin_keep_alive = 0x18, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, @@ -408,6 +583,7 @@ enum { NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_KATO = 0x0f, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, @@ -415,6 +591,7 @@ enum { NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), NVME_FWACT_REPL_ACTV = (1 << 3), @@ -427,8 +604,7 @@ struct nvme_identify { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cns; __u32 rsvd11[5]; }; @@ -439,8 +615,7 @@ struct nvme_features { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 fid; __le32 dword11; __u32 rsvd12[4]; @@ -499,8 +674,7 @@ struct nvme_download_firmware { __u8 flags; __u16 command_id; __u32 rsvd1[5]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 numd; __le32 offset; __u32 rsvd12[4]; @@ -516,6 +690,143 @@ struct nvme_format_cmd { __u32 rsvd11[5]; }; +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 rsvd10; + __le16 numdl; + __le16 numdu; + __u16 rsvd11; + __le32 lpol; + __le32 lpou; + __u32 rsvd14[2]; +}; + +/* + * Fabrics subcommands. + */ +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 0x7f, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0x00, + nvme_fabrics_type_connect = 0x01, + nvme_fabrics_type_property_get = 0x04, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +/* + * The legal cntlid range a NVMe Target will provide. + * Note that cntlid of value 0 is considered illegal in the fabrics world. + * Devices based on earlier specs did not have the subsystem concept; + * therefore, those devices had their cntlid value set to 0 as a result. + */ +#define NVME_CNTLID_MIN 1 +#define NVME_CNTLID_MAX 0xffef +#define NVME_CNTLID_DYNAMIC 0xffff + +#define MAX_DISC_LOGS 255 + +/* Discovery log page entry */ +struct nvmf_disc_rsp_page_entry { + __u8 trtype; + __u8 adrfam; + __u8 nqntype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __u8 resv8[22]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 resv64[192]; + char subnqn[NVMF_NQN_FIELD_LEN]; + char traddr[NVMF_TRADDR_SIZE]; + union tsas { + char common[NVMF_TSAS_SIZE]; + struct rdma { + __u8 qptype; + __u8 prtype; + __u8 cms; + __u8 resv3[5]; + __u16 pkey; + __u8 resv10[246]; + } rdma; + } tsas; +}; + +/* Discovery log page header */ +struct nvmf_disc_rsp_page_hdr { + __le64 genctr; + __le64 numrec; + __le16 recfmt; + __u8 resv14[1006]; + struct nvmf_disc_rsp_page_entry entries[0]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_connect_data { + uuid_le hostid; + __le16 cntlid; + char resv4[238]; + char subsysnqn[NVMF_NQN_FIELD_LEN]; + char hostnqn[NVMF_NQN_FIELD_LEN]; + char resv5[256]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + struct nvme_command { union { struct nvme_common_command common; @@ -529,10 +840,30 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; }; }; +static inline bool nvme_is_write(struct nvme_command *cmd) +{ + /* + * What a mess... + * + * Why can't we simply have a Fabrics In and Fabrics out command? + */ + if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + return cmd->fabrics.opcode & 1; + return cmd->common.opcode & 1; +} + enum { + /* + * Generic Command Status: + */ NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -551,10 +882,18 @@ enum { NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, + + NVME_SC_SGL_INVALID_OFFSET = 0x16, + NVME_SC_SGL_INVALID_SUBTYPE = 0x17, + NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, + + /* + * Command Specific Status: + */ NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -572,9 +911,29 @@ enum { NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + + /* + * I/O Command Set Specific - NVM commands: + */ NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, + + /* + * I/O Command Set Specific - Fabrics commands: + */ + NVME_SC_CONNECT_FORMAT = 0x180, + NVME_SC_CONNECT_CTRL_BUSY = 0x181, + NVME_SC_CONNECT_INVALID_PARAM = 0x182, + NVME_SC_CONNECT_RESTART_DISC = 0x183, + NVME_SC_CONNECT_INVALID_HOST = 0x184, + + NVME_SC_DISCOVERY_RESTART = 0x190, + NVME_SC_AUTH_REQUIRED = 0x191, + + /* + * Media and Data Integrity Errors: + */ NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -582,12 +941,19 @@ enum { NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, }; struct nvme_completion { - __le32 result; /* Used by admin commands to return data */ - __u32 rsvd; + /* + * Used by Admin and Fabrics commands to return data: + */ + union { + __le16 result16; + __le32 result; + __le64 result64; + }; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 9bb77d3ed6e0..c2256d746543 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) { } -static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/of.h b/include/linux/of.h index 74eb28cadbef..15c43f076b23 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); +typedef int (*of_init_fn_1_ret)(struct device_node *); typedef void (*of_init_fn_1)(struct device_node *); #define OF_DECLARE_1(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1) +#define OF_DECLARE_1_RET(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) #define OF_DECLARE_2(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_2) diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 901ec01c9fba..26c3302ae58f 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -53,6 +53,8 @@ extern char __dtb_end[]; extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); +extern int of_get_flat_dt_subnode_by_name(unsigned long node, + const char *uname); extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 8f2237eb3485..2ab233661ae5 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -19,12 +19,17 @@ extern struct phy_device *of_phy_connect(struct net_device *dev, struct device_node *phy_np, void (*hndlr)(struct net_device *), u32 flags, phy_interface_t iface); +extern struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)); struct phy_device *of_phy_attach(struct net_device *dev, struct device_node *phy_np, u32 flags, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); +extern int of_phy_register_fixed_link(struct device_node *np); +extern bool of_phy_is_fixed_link(struct device_node *np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) @@ -50,6 +55,13 @@ static inline struct phy_device *of_phy_connect(struct net_device *dev, return NULL; } +static inline struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)) +{ + return NULL; +} + static inline struct phy_device *of_phy_attach(struct net_device *dev, struct device_node *phy_np, u32 flags, phy_interface_t iface) @@ -67,12 +79,6 @@ static inline int of_mdio_parse_addr(struct device *dev, { return -ENOSYS; } -#endif /* CONFIG_OF */ - -#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) -extern int of_phy_register_fixed_link(struct device_node *np); -extern bool of_phy_is_fixed_link(struct device_node *np); -#else static inline int of_phy_register_fixed_link(struct device_node *np) { return -ENOSYS; diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index c201060e0c6d..f8e1992d6423 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -1,7 +1,8 @@ #ifndef __OF_RESERVED_MEM_H #define __OF_RESERVED_MEM_H -struct device; +#include <linux/device.h> + struct of_phandle_args; struct reserved_mem_ops; @@ -28,7 +29,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) #ifdef CONFIG_OF_RESERVED_MEM -int of_reserved_mem_device_init(struct device *dev); + +int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx); void of_reserved_mem_device_release(struct device *dev); int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, @@ -42,7 +45,8 @@ void fdt_init_reserved_mem(void); void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size); #else -static inline int of_reserved_mem_device_init(struct device *dev) +static inline int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx) { return -ENOSYS; } @@ -53,4 +57,19 @@ static inline void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size) { } #endif +/** + * of_reserved_mem_device_init() - assign reserved memory region to given device + * @dev: Pointer to the device to configure + * + * This function assigns respective DMA-mapping operations based on the first + * reserved memory region specified by 'memory-region' property in device tree + * node of the given device. + * + * Returns error code or zero on success. + */ +static inline int of_reserved_mem_device_init(struct device *dev) +{ + return of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); +} + #endif /* __OF_RESERVED_MEM_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index 83469522690a..5bc0457ee3a8 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -23,6 +23,9 @@ struct oom_control { /* Used to determine mempolicy */ nodemask_t *nodemask; + /* Memory cgroup in which oom is invoked, or NULL for global oom */ + struct mem_cgroup *memcg; + /* Used to determine cpuset and node locality requirement */ const gfp_t gfp_mask; @@ -70,9 +73,9 @@ static inline bool oom_task_origin(const struct task_struct *p) extern void mark_oom_victim(struct task_struct *tsk); #ifdef CONFIG_MMU -extern void try_oom_reaper(struct task_struct *tsk); +extern void wake_oom_reaper(struct task_struct *tsk); #else -static inline void try_oom_reaper(struct task_struct *tsk) +static inline void wake_oom_reaper(struct task_struct *tsk) { } #endif @@ -83,14 +86,13 @@ extern unsigned long oom_badness(struct task_struct *p, extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, const char *message); + const char *message); extern void check_panic_on_oom(struct oom_control *oc, - enum oom_constraint constraint, - struct mem_cgroup *memcg); + enum oom_constraint constraint); extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, - struct task_struct *task, unsigned long totalpages); + struct task_struct *task); extern bool out_of_memory(struct oom_control *oc); @@ -105,27 +107,7 @@ extern void oom_killer_enable(void); extern struct task_struct *find_lock_task_mm(struct task_struct *p); -static inline bool task_will_free_mem(struct task_struct *task) -{ - struct signal_struct *sig = task->signal; - - /* - * A coredumping process may sleep for an extended period in exit_mm(), - * so the oom killer cannot assume that the process will promptly exit - * and release memory. - */ - if (sig->flags & SIGNAL_GROUP_COREDUMP) - return false; - - if (!(task->flags & PF_EXITING)) - return false; - - /* Make sure that the whole thread group is going down */ - if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT)) - return false; - - return true; -} +bool task_will_free_mem(struct task_struct *task); /* sysctls */ extern int sysctl_oom_dump_tasks; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e5a32445f930..74e4dda91238 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -129,6 +129,9 @@ enum pageflags { /* Compound pages. Stored in first tail page's flags */ PG_double_map = PG_private_2, + + /* non-lru isolated movable page */ + PG_isolated = PG_reclaim, }; #ifndef __GENERATING_BOUNDS_H @@ -292,11 +295,11 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) */ TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) -PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ -PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND) - TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND) +PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) + TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) @@ -357,29 +360,37 @@ PAGEFLAG(Idle, idle, PF_ANY) * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, - * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; - * and then page->mapping points, not to an anon_vma, but to a private + * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON + * bit; and then page->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * - * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable + * page and then page->mapping points a struct address_space. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ -#define PAGE_MAPPING_ANON 1 -#define PAGE_MAPPING_KSM 2 -#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) +#define PAGE_MAPPING_ANON 0x1 +#define PAGE_MAPPING_MOVABLE 0x2 +#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) -static __always_inline int PageAnonHead(struct page *page) +static __always_inline int PageMappingFlags(struct page *page) { - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } static __always_inline int PageAnon(struct page *page) { page = compound_head(page); - return PageAnonHead(page); + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + +static __always_inline int __PageMovable(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_MOVABLE; } #ifdef CONFIG_KSM @@ -393,7 +404,7 @@ static __always_inline int PageKsm(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + PAGE_MAPPING_KSM; } #else TESTPAGEFLAG_FALSE(Ksm) @@ -570,6 +581,17 @@ static inline int PageDoubleMap(struct page *page) return PageHead(page) && test_bit(PG_double_map, &page[1].flags); } +static inline void SetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + set_bit(PG_double_map, &page[1].flags); +} + +static inline void ClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + clear_bit(PG_double_map, &page[1].flags); +} static inline int TestSetPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); @@ -587,59 +609,59 @@ TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) -TESTPAGEFLAG_FALSE(DoubleMap) +PAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) TESTCLEARFLAG_FALSE(DoubleMap) #endif /* + * For pages that are never mapped to userspace, page->mapcount may be + * used for storing extra information about page type. Any value used + * for this purpose must be <= -2, but it's better start not too close + * to -2 so that an underflow of the page_mapcount() won't be mistaken + * for a special page. + */ +#define PAGE_MAPCOUNT_OPS(uname, lname) \ +static __always_inline int Page##uname(struct page *page) \ +{ \ + return atomic_read(&page->_mapcount) == \ + PAGE_##lname##_MAPCOUNT_VALUE; \ +} \ +static __always_inline void __SetPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ + atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ +} \ +static __always_inline void __ClearPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(!Page##uname(page), page); \ + atomic_set(&page->_mapcount, -1); \ +} + +/* * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). - * - * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to - * -2 so that an underflow of the page_mapcount() won't be mistaken - * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very - * efficiently by most CPU architectures. */ -#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) - -static inline int PageBuddy(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; -} +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) +PAGE_MAPCOUNT_OPS(Buddy, BUDDY) -static inline void __SetPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); -} +/* + * PageBalloon() is set on pages that are on the balloon page list + * (see mm/balloon_compaction.c). + */ +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) +PAGE_MAPCOUNT_OPS(Balloon, BALLOON) -static inline void __ClearPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBuddy(page), page); - atomic_set(&page->_mapcount, -1); -} +/* + * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on + * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. + */ +#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) +PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) extern bool is_free_buddy_page(struct page *page); -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) - -static inline int PageBalloon(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; -} - -static inline void __SetPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); -} - -static inline void __ClearPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBalloon(page), page); - atomic_set(&page->_mapcount, -1); -} +__PAGEFLAG(Isolated, isolated, PF_ANY); /* * If network-based swap is enabled, sl*b must keep track of whether pages diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e1fe7cf5bddf..03f2a3e7d76d 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/stacktrace.h> +#include <linux/stackdepot.h> struct pglist_data; struct page_ext_operations { @@ -44,9 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; - unsigned int nr_entries; int last_migrate_reason; - unsigned long trace_entries[8]; + depot_stack_handle_t handle; #endif }; diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 46f1b939948c..30583ab0ffb1 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern gfp_t __get_page_owner_gfp(struct page *page); +extern void __split_page_owner(struct page *page, unsigned int order); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(struct page *page); @@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, unsigned int order) { if (static_branch_unlikely(&page_owner_inited)) - return __get_page_owner_gfp(page); - else - return 0; + __split_page_owner(page, order); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { @@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, + unsigned int order) { - return 0; } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 97354102794d..81363b834900 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } -static inline struct page *page_cache_alloc_readahead(struct address_space *x) +static inline gfp_t readahead_gfp_mask(struct address_space *x) { - return __page_cache_alloc(mapping_gfp_mask(x) | - __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); + return mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(void *, struct page *); diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 84f542df7ff5..1c7eec09e5eb 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * used as a pointer. If the compiler generates a separate fetch * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that - * ACCESS_ONCE() is required when fetching it. - * - * Also, we need a data dependency barrier to be paired with - * smp_store_release() in __percpu_ref_switch_to_percpu(). - * - * Use lockless deref which contains both. + * READ_ONCE() is required when fetching it. */ - percpu_ptr = lockless_dereference(ref->percpu_count_ptr); + percpu_ptr = READ_ONCE(ref->percpu_count_ptr); + + /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */ + smp_read_barrier_depends(); /* * Theoretically, the following could test just ATOMIC; however, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a827cecd62f..e1f921c2e4e0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -69,9 +69,22 @@ struct perf_callchain_entry_ctx { bool contexts_maxed; }; +typedef unsigned long (*perf_copy_f)(void *dst, const void *src, + unsigned long off, unsigned long len); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __packed; + struct perf_raw_record { + struct perf_raw_frag frag; u32 size; - void *data; }; /* @@ -517,6 +530,11 @@ struct swevent_hlist { struct perf_cgroup; struct ring_buffer; +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + /** * struct perf_event - performance event kernel representation: */ @@ -675,6 +693,7 @@ struct perf_event { int cgrp_defer_enabled; #endif + struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ }; @@ -1074,7 +1093,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); -extern int get_callchain_buffers(void); +extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern int sysctl_perf_event_max_stack; @@ -1283,6 +1302,11 @@ extern void perf_restore_debug_store(void); static inline void perf_restore_debug_store(void) { } #endif +static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) +{ + return frag->pad < sizeof(u64); +} + #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* @@ -1326,6 +1350,13 @@ struct perf_pmu_events_attr { const char *event_str; }; +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 94994810c7c0..a3d90b9da18d 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -28,7 +28,10 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn) return __pfn_to_pfn_t(pfn, 0); } -extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags); +static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) +{ + return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); +} static inline bool pfn_t_has_page(pfn_t pfn) { diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index a810f2a18842..f08b67238b58 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -22,12 +22,20 @@ struct phy; +enum phy_mode { + PHY_MODE_INVALID, + PHY_MODE_USB_HOST, + PHY_MODE_USB_DEVICE, + PHY_MODE_USB_OTG, +}; + /** * struct phy_ops - set of function pointers for performing phy operations * @init: operation to be performed for initializing phy * @exit: operation to be performed while exiting * @power_on: powering on the phy * @power_off: powering off the phy + * @set_mode: set the mode of the phy * @owner: the module owner containing the ops */ struct phy_ops { @@ -35,6 +43,7 @@ struct phy_ops { int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); + int (*set_mode)(struct phy *phy, enum phy_mode mode); struct module *owner; }; @@ -126,6 +135,7 @@ int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); +int phy_set_mode(struct phy *phy, enum phy_mode mode); static inline int phy_get_bus_width(struct phy *phy) { return phy->attrs.bus_width; @@ -233,6 +243,13 @@ static inline int phy_power_off(struct phy *phy) return -ENOSYS; } +static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) +{ + if (!phy) + return 0; + return -ENOSYS; +} + static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index d921afd5f109..12343caa114e 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -175,6 +175,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps, enum pinctrl_map_type type); +void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps); static inline int pinconf_generic_dt_node_to_map_group( struct pinctrl_dev *pctldev, struct device_node *np_config, diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index be830b141d83..271a4e25af67 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/nvmem-consumer.h> +#include <linux/bitops.h> /** * struct at24_platform_data - data to set up at24 (generic eeprom) driver @@ -43,10 +44,12 @@ struct at24_platform_data { u32 byte_len; /* size (sum of all addr) */ u16 page_size; /* for writes */ u8 flags; -#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */ -#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ -#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ -#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ +#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ +#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ +#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ void (*setup)(struct nvmem_device *nvmem, void *context); void *context; diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h new file mode 100644 index 000000000000..69d279c0da96 --- /dev/null +++ b/include/linux/platform_data/b53.h @@ -0,0 +1,33 @@ +/* + * B53 platform data + * + * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_H +#define __B53_H + +#include <linux/kernel.h> + +struct b53_platform_data { + u32 chip_id; + u16 enabled_ports; + + /* only used by MMAP'd driver */ + unsigned big_endian:1; + void __iomem *regs; +}; + +#endif diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h new file mode 100644 index 000000000000..2e5eea358194 --- /dev/null +++ b/include/linux/platform_data/sht3x.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Sensirion AG, Switzerland + * Author: David Frey <david.frey@sensirion.com> + * Author: Pascal Sachs <pascal.sachs@sensirion.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SHT3X_H_ +#define __SHT3X_H_ + +struct sht3x_platform_data { + bool blocking_io; + bool high_precision; +}; +#endif /* __SHT3X_H_ */ diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index fb5625bcca9a..5c1e21c87270 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -38,6 +38,7 @@ struct s3c64xx_spi_csinfo { struct s3c64xx_spi_info { int src_clk_nr; int num_cs; + bool no_cs; int (*cfg_gpio)(void); dma_filter_fn filter; void *dma_tx; diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 308d6044f153..09779b0ae720 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -42,6 +42,7 @@ extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add_clk(struct device *dev, struct clk *clk); +extern int of_pm_clk_add_clk(struct device *dev, const char *name); extern int of_pm_clk_add_clks(struct device *dev); extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 39285c7bd3f5..31fec858088c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -57,7 +57,6 @@ struct generic_pm_domain { unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ - bool suspend_power_off; /* Power status before system suspend */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct gpd_dev_ops dev_ops; @@ -128,8 +127,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -164,9 +163,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off) +static inline int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) { + return -ENOSYS; } #endif diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 57d146fe44dd..e856c2cb0fe8 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -26,47 +26,35 @@ * calling these symbols with arch_has_pmem_api() and redirect to the * implementation in asm/pmem.h. */ -static inline bool __arch_has_wmb_pmem(void) -{ - return false; -} - -static inline void arch_wmb_pmem(void) -{ - BUG(); -} - -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) { BUG(); } -static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, - size_t n) +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) { BUG(); return -EFAULT; } -static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { BUG(); return 0; } -static inline void arch_clear_pmem(void __pmem *addr, size_t size) +static inline void arch_clear_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) +static inline void arch_wb_cache_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +static inline void arch_invalidate_pmem(void *addr, size_t size) { BUG(); } @@ -77,13 +65,6 @@ static inline bool arch_has_pmem_api(void) return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); } -static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) -{ - memcpy(dst, (void __force *) src, size); - return 0; -} - /* * memcpy_from_pmem - read from persistent memory with error handling * @dst: destination buffer @@ -92,54 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, * * Returns 0 on success negative error code on failure. */ -static inline int memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) +static inline int memcpy_from_pmem(void *dst, void const *src, size_t size) { if (arch_has_pmem_api()) return arch_memcpy_from_pmem(dst, src, size); else - return default_memcpy_from_pmem(dst, src, size); -} - -/** - * arch_has_wmb_pmem - true if wmb_pmem() ensures durability - * - * For a given cpu implementation within an architecture it is possible - * that wmb_pmem() resolves to a nop. In the case this returns - * false, pmem api users are unable to ensure durability and may want to - * fall back to a different data consistency model, or otherwise notify - * the user. - */ -static inline bool arch_has_wmb_pmem(void) -{ - return arch_has_pmem_api() && __arch_has_wmb_pmem(); -} - -/* - * These defaults seek to offer decent performance and minimize the - * window between i/o completion and writes being durable on media. - * However, it is undefined / architecture specific whether - * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for - * making data durable relative to i/o completion. - */ -static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t size) -{ - memcpy((void __force *) dst, src, size); -} - -static inline size_t default_copy_from_iter_pmem(void __pmem *addr, - size_t bytes, struct iov_iter *i) -{ - return copy_from_iter_nocache((void __force *)addr, bytes, i); -} - -static inline void default_clear_pmem(void __pmem *addr, size_t size) -{ - if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) - clear_page((void __force *)addr); - else - memset((void __force *)addr, 0, size); + memcpy(dst, src, size); + return 0; } /** @@ -152,29 +92,14 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) * being effectively evicted from, or never written to, the processor * cache hierarchy after the copy completes. After memcpy_to_pmem() * data may still reside in cpu or platform buffers, so this operation - * must be followed by a wmb_pmem(). + * must be followed by a blkdev_issue_flush() on the pmem block device. */ -static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) +static inline void memcpy_to_pmem(void *dst, const void *src, size_t n) { if (arch_has_pmem_api()) arch_memcpy_to_pmem(dst, src, n); else - default_memcpy_to_pmem(dst, src, n); -} - -/** - * wmb_pmem - synchronize writes to persistent memory - * - * After a series of memcpy_to_pmem() operations this drains data from - * cpu write buffers and any platform (memory controller) buffers to - * ensure that written data is durable on persistent memory media. - */ -static inline void wmb_pmem(void) -{ - if (arch_has_wmb_pmem()) - arch_wmb_pmem(); - else - wmb(); + memcpy(dst, src, n); } /** @@ -184,14 +109,14 @@ static inline void wmb_pmem(void) * @i: iterator with source data * * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { if (arch_has_pmem_api()) return arch_copy_from_iter_pmem(addr, bytes, i); - return default_copy_from_iter_pmem(addr, bytes, i); + return copy_from_iter_nocache(addr, bytes, i); } /** @@ -200,14 +125,14 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, * @size: number of bytes to zero * * Write zeros into the memory range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void clear_pmem(void __pmem *addr, size_t size) +static inline void clear_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_clear_pmem(addr, size); else - default_clear_pmem(addr, size); + memset(addr, 0, size); } /** @@ -218,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size) * For platforms that support clearing poison this flushes any poisoned * ranges out of the cache */ -static inline void invalidate_pmem(void __pmem *addr, size_t size) +static inline void invalidate_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_invalidate_pmem(addr, size); @@ -230,9 +155,9 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size) * @size: number of bytes to write back * * Write back the processor cache range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with a wmb_pmem() call. + * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void wb_cache_pmem(void __pmem *addr, size_t size) +static inline void wb_cache_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_wb_cache_pmem(addr, size); diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h index 24f51db8a83f..89d3f1cb3433 100644 --- a/include/linux/power/max8903_charger.h +++ b/include/linux/power/max8903_charger.h @@ -26,8 +26,8 @@ struct max8903_pdata { /* * GPIOs - * cen, chg, flt, and usus are optional. - * dok, dcm, and uok are not optional depending on the status of + * cen, chg, flt, dcm and usus are optional. + * dok and uok are not optional depending on the status of * dc_valid and usb_valid. */ int cen; /* Charger Enable input */ @@ -41,7 +41,7 @@ struct max8903_pdata { /* * DC(Adapter/TA) is wired * When dc_valid is true, - * dok and dcm should be valid. + * dok should be valid. * * At least one of dc_valid or usb_valid should be true. */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 751061790626..3965503315ef 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -248,6 +248,7 @@ struct power_supply { struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; + bool initialized; atomic_t use_cnt; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; diff --git a/include/linux/printk.h b/include/linux/printk.h index f4da695fd615..f136b22c7772 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -108,11 +108,14 @@ struct va_format { * Dummy printk for disabled debugging statements to use whilst maintaining * gcc's format checking. */ -#define no_printk(fmt, ...) \ -do { \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ -} while (0) +#define no_printk(fmt, ...) \ +({ \ + do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + } while (0); \ + 0; \ +}) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) @@ -309,20 +312,24 @@ extern asmlinkage void dump_stack(void) __cold; #define printk_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #define printk_deferred_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk_deferred(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #else #define printk_once(fmt, ...) \ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 831479f8df8f..899e95e84400 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -58,7 +58,8 @@ struct pstore_info { int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, - bool *compressed, struct pstore_info *psi); + bool *compressed, ssize_t *ecc_notice_size, + struct pstore_info *psi); int (*write)(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, int count, bool compressed, diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h new file mode 100644 index 000000000000..2052011bf9fb --- /dev/null +++ b/include/linux/ptr_ring.h @@ -0,0 +1,448 @@ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/cache.h> +#include <linux/slab.h> +#include <asm/errno.h> +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer ____cacheline_aligned_in_smp; + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). If ring is ever resized, callers must hold + * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold + * producer_lock, the next call to __ptr_ring_produce may fail. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + r->queue[r->producer++] = ptr; + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. + * If ring is never resized, and if the pointer is merely + * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. + */ +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return r->queue[r->consumer]; + return NULL; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must take consumer_lock + * if the ring is ever resized - see e.g. ptr_ring_empty. + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + return !__ptr_ring_peek(r); +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + r->queue[r->consumer++] = NULL; + if (unlikely(r->consumer >= r->size)) + r->consumer = 0; +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); + + return ptr; +} + +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) +{ + return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + r->size = size; + r->producer = r->consumer = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + r->size = size; + r->producer = producer; + r->consumer = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->producer_lock, flags); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock_irqrestore(&(r)->producer_lock, flags); + + kfree(old); + + return 0; +} + +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc(nrings * sizeof *queues, gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->producer_lock, flags); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 3f14c7efe68f..40c0ada01806 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -12,10 +12,21 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 #define X_FINAL_CLEANUP_AGG_INT 1 +#define NUM_OF_GLOBAL_QUEUES 128 + +/* Queue Zone sizes in bytes */ +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 0 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 7 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 10 +#define FW_REVISION_VERSION 5 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -97,45 +108,86 @@ #define DQ_XCM_AGG_VAL_SEL_REG6 7 /* XCM agg val selection */ -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_CONS_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_TX_BD_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD \ - DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* UCM agg val selection (HW) */ +#define DQ_UCM_AGG_VAL_SEL_WORD0 0 +#define DQ_UCM_AGG_VAL_SEL_WORD1 1 +#define DQ_UCM_AGG_VAL_SEL_WORD2 2 +#define DQ_UCM_AGG_VAL_SEL_WORD3 3 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 + +/* UCM agg val selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 +#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3 +#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0 +#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2 + +/* TCM agg val selection (HW) */ +#define DQ_TCM_AGG_VAL_SEL_WORD0 0 +#define DQ_TCM_AGG_VAL_SEL_WORD1 1 +#define DQ_TCM_AGG_VAL_SEL_WORD2 2 +#define DQ_TCM_AGG_VAL_SEL_WORD3 3 +#define DQ_TCM_AGG_VAL_SEL_REG1 4 +#define DQ_TCM_AGG_VAL_SEL_REG2 5 +#define DQ_TCM_AGG_VAL_SEL_REG6 6 +#define DQ_TCM_AGG_VAL_SEL_REG9 7 + +/* TCM agg val selection (FW) */ +#define DQ_TCM_L2B_BD_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD1 +#define DQ_TCM_ROCE_RQ_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD0 /* XCM agg counter flag selection */ -#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 -#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 -#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 -#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 -#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 -#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 -#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 -#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 /* XCM agg counter flag selection */ -#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ - DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) + +/* UCM agg counter flag selection (HW) */ +#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_UCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_UCM_AGG_FLG_SHIFT_CF3 2 +#define DQ_UCM_AGG_FLG_SHIFT_CF4 3 +#define DQ_UCM_AGG_FLG_SHIFT_CF5 4 +#define DQ_UCM_AGG_FLG_SHIFT_CF6 5 +#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6 +#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 + +/* UCM agg counter flag selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) + +#define DQ_REGION_SHIFT (12) + +/* DPM */ +#define DQ_DPM_WQE_BUFF_SIZE (320) + +/* Conn type ranges */ +#define DQ_CONN_TYPE_RANGE_SHIFT (4) /*****************/ /* QM CONSTANTS */ @@ -282,8 +334,6 @@ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 #define PXP_VF_BAR0_START_IGU 0 #define PXP_VF_BAR0_IGU_LENGTH 0x3000 @@ -342,6 +392,9 @@ #define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 @@ -379,6 +432,38 @@ struct async_data { u8 fw_debug_param; }; +struct coalescing_timeset { + u8 value; +#define COALESCING_TIMESET_TIMESET_MASK 0x7F +#define COALESCING_TIMESET_TIMESET_SHIFT 0 +#define COALESCING_TIMESET_VALID_MASK 0x1 +#define COALESCING_TIMESET_VALID_SHIFT 7 +}; + +struct common_prs_pf_msg_info { + __le32 value; +#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1 +#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3 +#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF +#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4 +}; + +struct common_queue_zone { + __le16 ring_drv_data_consumer; + __le16 reserved; +}; + +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 cqe_prod; +}; + struct regpair { __le32 lo; __le32 hi; @@ -388,11 +473,23 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; +struct malicious_vf_eqe_data { + u8 vf_id; + u8 err_id; + __le16 reserved[3]; +}; + +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + /* Event Data Union */ union event_ring_data { - u8 bytes[8]; - struct vf_pf_channel_eqe_data vf_pf_channel; - struct async_data async_info; + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct malicious_vf_eqe_data malicious_vf; + struct initial_cleanup_eqe_data vf_init_cleanup; }; /* Event Ring Entry */ @@ -420,9 +517,9 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { - PROTOCOLID_RESERVED1, + PROTOCOLID_ISCSI, PROTOCOLID_RESERVED2, - PROTOCOLID_RESERVED3, + PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_RESERVED4, @@ -433,6 +530,16 @@ enum protocol_type { MAX_PROTOCOL_TYPE }; +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[3]; +}; + +struct ustorm_queue_zone { + struct ustorm_eth_queue_zone eth; + struct common_queue_zone common; +}; + /* status block structure */ struct cau_pi_entry { u32 prod; @@ -588,7 +695,10 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; -/* Concrete Function ID. */ +struct pb_context { + __le32 crc[4]; +}; + struct pxp_concrete_fid { __le16 fid; #define PXP_CONCRETE_FID_PFID_MASK 0xF @@ -655,6 +765,72 @@ struct pxp_ptt_entry { }; /* RSS hash type */ +struct rdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + u8 flags0; +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 + u8 partial_dif_data[7]; + __le16 partial_crc_value; + __le16 partial_checksum_value; + __le32 offset_in_io; + __le16 flags1; +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 + __le16 state; +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 + __le32 reserved2; +}; + enum rss_hash_type { RSS_HASH_TYPE_DEFAULT = 0, RSS_HASH_TYPE_IPV4 = 1, @@ -683,19 +859,122 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; -struct tunnel_parsing_flags { - u8 flags; -#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 -#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 -#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 -#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 -#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 -#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 -#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 -#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 -#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +struct tdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + __le16 partial_crc_valueB; + __le16 partial_checksum_valueB; + __le16 stateB; +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 + u8 reserved1; + u8 flags0; +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 + __le32 flags1; +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 +#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_iob; + __le16 partial_crc_value_a; + __le16 partial_checksum_valuea_; + __le32 offset_in_ioa; + u8 partial_dif_data_a[8]; + u8 partial_dif_data_b[8]; +}; + +struct timers_context { + __le32 logical_client0; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 + __le32 logical_client1; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 + __le32 logical_client2; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 + __le32 host_expiration_fields; +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 }; #endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 092cb0c1afcb..b5ebc697d05f 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -12,6 +12,8 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 0 #define ETH_CACHE_LINE_SIZE 64 #define ETH_MAX_RAMROD_PER_CON 8 @@ -57,19 +59,6 @@ #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 -/* Queue Zone sizes */ -#define TSTORM_QZONE_SIZE 0 -#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) -#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) -#define XSTORM_QZONE_SIZE 0 -#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) -#define PSTORM_QZONE_SIZE 0 - -/* Interrupt coalescing TimeSet */ -struct coalescing_timeset { - u8 timeset; - u8 valid; -}; struct eth_tx_1st_bd_flags { u8 bitfields; @@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd { u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 #define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet. */ @@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd { #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; +struct eth_fast_path_cqe_fw_debug { + u8 reserved0; + u8 reserved1; + __le16 reserved2; +}; + +/* tunneling parsing flags */ +struct eth_tunnel_parsing_flags { + u8 flags; +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; + /* Regular ETH Rx FP CQE. */ struct eth_fast_path_rx_reg_cqe { - u8 type; - u8 bitfields; + u8 type; + u8 bitfields; #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 - __le16 pkt_len; - struct parsing_and_err_flags pars_flags; - __le16 vlan_tag; - __le32 rss_hash; - __le16 len_on_first_bd; - u8 placement_offset; - struct tunnel_parsing_flags tunnel_pars_flags; - u8 bd_num; - u8 reserved[7]; - u32 fw_debug; - u8 reserved1[3]; - u8 flags; + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved[7]; + struct eth_fast_path_cqe_fw_debug fw_debug; + u8 reserved1[3]; + u8 flags; #define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 #define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 #define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 @@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe { __le32 rss_hash; __le16 len_on_first_bd; u8 placement_offset; - struct tunnel_parsing_flags tunnel_pars_flags; + struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; - u32 fw_debug; + struct eth_fast_path_cqe_fw_debug fw_debug; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -264,12 +276,25 @@ enum eth_rx_cqe_type { MAX_ETH_RX_CQE_TYPE }; -/* ETH Rx producers data */ -struct eth_rx_prod_data { - __le16 bd_prod; - __le16 cqe_prod; - __le16 reserved; - __le16 reserved1; +enum eth_rx_tunn_type { + ETH_RX_NO_TUNN, + ETH_RX_TUNN_GENEVE, + ETH_RX_TUNN_GRE, + ETH_RX_TUNN_VXLAN, + MAX_ETH_RX_TUNN_TYPE +}; + +/* Aggregation end reason. */ +enum eth_tpa_end_reason { + ETH_AGG_END_UNUSED, + ETH_AGG_END_SP_UPDATE, + ETH_AGG_END_MAX_LEN, + ETH_AGG_END_LAST_SEG, + ETH_AGG_END_TIMEOUT, + ETH_AGG_END_NOT_CONSISTENT, + ETH_AGG_END_OUT_OF_ORDER, + ETH_AGG_END_NON_TPA_SEG, + MAX_ETH_TPA_END_REASON }; /* The first tx bd of a given packet */ @@ -337,21 +362,18 @@ union eth_tx_bd_types { }; /* Mstorm Queue Zone */ -struct mstorm_eth_queue_zone { - struct eth_rx_prod_data rx_producers; - __le32 reserved[2]; -}; - -/* Ustorm Queue Zone */ -struct ustorm_eth_queue_zone { - struct coalescing_timeset int_coalescing_timeset; - __le16 reserved[3]; +enum eth_tx_tunn_type { + ETH_TX_TUNN_GENEVE, + ETH_TX_TUNN_TTAG, + ETH_TX_TUNN_GRE, + ETH_TX_TUNN_VXLAN, + MAX_ETH_TX_TUNN_TYPE }; /* Ystorm Queue Zone */ -struct ystorm_eth_queue_zone { - struct coalescing_timeset int_coalescing_timeset; - __le16 reserved[3]; +struct xstorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[7]; }; /* ETH doorbell data */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h new file mode 100644 index 000000000000..b3c0feb15ae9 --- /dev/null +++ b/include/linux/qed/iscsi_common.h @@ -0,0 +1,1439 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ISCSI_COMMON__ +#define __ISCSI_COMMON__ +/**********************/ +/* ISCSI FW CONSTANTS */ +/**********************/ + +/* iSCSI HSI constants */ +#define ISCSI_DEFAULT_MTU (1500) + +/* Current iSCSI HSI version number composed of two fields (16 bit) */ +#define ISCSI_HSI_MAJOR_VERSION (0) +#define ISCSI_HSI_MINOR_VERSION (0) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_SLOW_PATH_LAYER_CODE (6) + +/* CQE completion status */ +#define ISCSI_EQE_COMPLETION_SUCCESS (0x0) +#define ISCSI_EQE_RST_CONN_RCVD (0x1) + +/* iSCSI parameter defaults */ +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) + +/* iSCSI parameter limits */ +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) + +/* iSCSI reserved params */ +#define ISCSI_ITT_ALL_ONES (0xffffffff) +#define ISCSI_TTT_ALL_ONES (0xffffffff) + +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 + +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) +#define ISCSI_OPCODE_NOP_OUT ( \ + ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) +#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) +#define ISCSI_OPCODE_SCSI_CMD ( \ + ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) +#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) +#define ISCSI_OPCODE_TMF_REQUEST ( \ + ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) +#define ISCSI_OPCODE_LOGIN_REQUEST ( \ + ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) +#define ISCSI_OPCODE_TEXT_REQUEST ( \ + ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) +#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ + ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) + +/* iSCSI stages */ +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) + +/* iSCSI CQE errors */ +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) + +struct cqe_error_bitmap { + u8 cqe_error_status_bits; +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +}; + +union cqe_error_status { + u8 error_status; + struct cqe_error_bitmap error_bits; +}; + +struct data_hdr { + __le32 data[12]; +}; + +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +struct iscsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; +}; + +struct iscsi_cached_sge_ctx { + struct iscsi_sge sge; + struct regpair reserved; + __le32 dsgl_curr_offset[2]; +}; + +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 cdb[4]; +}; + +struct iscsi_common_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 hdr_first_byte; +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 lun_reserved[4]; + __le32 data[6]; +}; + +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; +}; + +struct iscsi_ext_cdb_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + struct iscsi_sge cdb_sge; +}; + +struct iscsi_login_req_hdr { + u8 version_min; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_TABC; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le16 reserved1; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved2[4]; +}; + +struct iscsi_logout_req_hdr { + __le16 reserved0; + u8 reason_code; + u8 opcode; + __le32 reserved1; + __le32 reserved2[2]; + __le32 itt; + __le16 reserved3; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +struct iscsi_data_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 reserved3; + __le32 exp_stat_sn; + __le32 reserved4; + __le32 data_sn; + __le32 buffer_offset; + __le32 reserved5; +}; + +struct iscsi_data_in_hdr { + u8 status_rsvd; + u8 reserved1; + u8 flags; +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 buffer_offset; + __le32 residual_count; +}; + +struct iscsi_r2t_hdr { + u8 reserved0[3]; + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 r2t_sn; + __le32 buffer_offset; + __le32 desired_data_trns_len; +}; + +struct iscsi_nop_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; +}; + +struct iscsi_nop_in_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; +}; + +struct iscsi_login_response_hdr { + u8 version_active; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_TABC; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 reserved2; + u8 status_detail; + u8 status_class; + __le32 reserved4[2]; +}; + +struct iscsi_logout_response_hdr { + u8 reserved1; + u8 response; + u8 flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 reserved2[2]; + __le32 itt; + __le32 reserved3; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4; + __le16 time2retain; + __le16 time2wait; + __le32 reserved5[1]; +}; + +struct iscsi_text_request_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +struct iscsi_text_response_hdr { + __le16 reserved1; + u8 flags; +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +struct iscsi_tmf_request_hdr { + __le16 reserved0; + u8 function; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 rtt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 ref_cmd_sn; + __le32 exp_data_sn; + __le32 reserved4[2]; +}; + +struct iscsi_tmf_response_hdr { + u8 reserved2; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 itt; + __le32 rtt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +struct iscsi_response_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 snack_tag; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 exp_data_sn; + __le32 bi_residual_count; + __le32 residual_count; +}; + +struct iscsi_reject_hdr { + u8 reserved4; + u8 hdr_reason; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 reserved1; + __le32 reserved2; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 reserved3[2]; +}; + +union iscsi_task_hdr { + struct iscsi_common_hdr common; + struct data_hdr data; + struct iscsi_cmd_hdr cmd; + struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd; + struct iscsi_login_req_hdr login_req; + struct iscsi_logout_req_hdr logout_req; + struct iscsi_data_out_hdr data_out; + struct iscsi_data_in_hdr data_in; + struct iscsi_r2t_hdr r2t; + struct iscsi_nop_out_hdr nop_out; + struct iscsi_nop_in_hdr nop_in; + struct iscsi_login_response_hdr login_response; + struct iscsi_logout_response_hdr logout_response; + struct iscsi_text_request_hdr text_request; + struct iscsi_text_response_hdr text_response; + struct iscsi_tmf_request_hdr tmf_request; + struct iscsi_tmf_response_hdr tmf_response; + struct iscsi_response_hdr response; + struct iscsi_reject_hdr reject; + struct iscsi_async_msg_hdr async_msg; +}; + +struct iscsi_cqe_common { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le32 reserved[3]; + union iscsi_task_hdr iscsi_hdr; +}; + +struct iscsi_cqe_solicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 itid; + u8 task_type; + u8 fw_dbg_field; + __le32 reserved1[2]; + union iscsi_task_hdr iscsi_hdr; +}; + +struct iscsi_cqe_unsolicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 reserved0; + u8 reserved1; + u8 unsol_cqe_type; + struct regpair rqe_opaque; + union iscsi_task_hdr iscsi_hdr; +}; + +union iscsi_cqe { + struct iscsi_cqe_common cqe_common; + struct iscsi_cqe_solicited cqe_solicited; + struct iscsi_cqe_unsolicited cqe_unsolicited; +}; + +enum iscsi_cqes_type { + ISCSI_CQE_TYPE_SOLICITED = 1, + ISCSI_CQE_TYPE_UNSOLICITED, + ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE + , + ISCSI_CQE_TYPE_TASK_CLEANUP, + ISCSI_CQE_TYPE_DUMMY, + MAX_ISCSI_CQES_TYPE +}; + +enum iscsi_cqe_unsolicited_type { + ISCSI_CQE_UNSOLICITED_NONE, + ISCSI_CQE_UNSOLICITED_SINGLE, + ISCSI_CQE_UNSOLICITED_FIRST, + ISCSI_CQE_UNSOLICITED_MIDDLE, + ISCSI_CQE_UNSOLICITED_LAST, + MAX_ISCSI_CQE_UNSOLICITED_TYPE +}; + +struct iscsi_virt_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + __le32 sgl_initial_offset; + __le32 dsgl_initial_offset; + __le32 dsgl_curr_offset[2]; +}; + +struct iscsi_sgl_var_params { + u8 sgl_ptr; + u8 dsgl_ptr; + __le16 sge_offset; + __le16 dsge_offset; +}; + +struct iscsi_phys_sgl_ctx { + struct regpair sgl_base; + struct regpair dsgl_base; + u8 sgl_size; + u8 dsgl_size; + __le16 reserved; + struct iscsi_sgl_var_params var_params[2]; +}; + +union iscsi_data_desc_ctx { + struct iscsi_virt_sgl_ctx virt_sgl; + struct iscsi_phys_sgl_ctx phys_sgl; + struct iscsi_cached_sge_ctx cached_sge; +}; + +struct iscsi_debug_modes { + u8 flags; +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3 +#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6 +}; + +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 +}; + +enum iscsi_eqe_opcode { + ISCSI_EVENT_TYPE_INIT_FUNC = 0, + ISCSI_EVENT_TYPE_DESTROY_FUNC, + ISCSI_EVENT_TYPE_OFFLOAD_CONN, + ISCSI_EVENT_TYPE_UPDATE_CONN, + ISCSI_EVENT_TYPE_CLEAR_SQ, + ISCSI_EVENT_TYPE_TERMINATE_CONN, + ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, + ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, + RESERVED8, + RESERVED9, + ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, + ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, + ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, + ISCSI_EVENT_TYPE_ASYN_SYN_RCVD, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT, + ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, + ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, + ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, + ISCSI_EVENT_TYPE_TCP_CONN_ERROR, + ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES, + MAX_ISCSI_EQE_OPCODE +}; + +enum iscsi_error_types { + ISCSI_STATUS_NONE = 0, + ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, + ISCSI_CONN_ERROR_TASK_CID_MISMATCH, + ISCSI_CONN_ERROR_TASK_NOT_VALID, + ISCSI_CONN_ERROR_RQ_RING_IS_FULL, + ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, + ISCSI_CONN_ERROR_HQE_CACHING_FAILED, + ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, + ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, + ISCSI_CONN_ERROR_DATA_OVERRUN, + ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, + ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, + ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, + ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, + ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, + ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, + ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, + ISCSI_ERROR_UNKNOWN, + MAX_ISCSI_ERROR_TYPES +}; + +struct iscsi_mflags { + u8 mflags; +#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1 +#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0 +#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1 +#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1 +#define ISCSI_MFLAGS_RESERVED_MASK 0x3F +#define ISCSI_MFLAGS_RESERVED_SHIFT 2 +}; + +struct iscsi_sgl { + struct regpair sgl_addr; + __le16 updated_sge_size; + __le16 updated_sge_offset; + __le32 byte_offset; +}; + +union iscsi_mstorm_sgl { + struct iscsi_sgl sgl_struct; + struct iscsi_sge single_sge; +}; + +enum iscsi_ramrod_cmd_id { + ISCSI_RAMROD_CMD_ID_UNUSED = 0, + ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, + MAX_ISCSI_RAMROD_CMD_ID +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 3 +}; + +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; +}; + +struct iscsi_spe_conn_offload { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params tcp; +}; + +struct iscsi_spe_conn_offload_option2 { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params_opt2 tcp; +}; + +struct iscsi_spe_conn_termination { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 abortive; + u8 reserved0[7]; + struct regpair queue_cnts_addr; + struct regpair query_params_addr; +}; + +struct iscsi_spe_func_dstry { + struct iscsi_slow_path_hdr hdr; + __le16 reserved0; + __le32 reserved1; +}; + +struct iscsi_spe_func_init { + struct iscsi_slow_path_hdr hdr; + __le16 half_way_close_timeout; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 ll2_rx_queue_id; + u8 ooo_enable; + struct iscsi_debug_modes debug_mode; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; + __le32 reserved4; + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; +}; + +struct ystorm_iscsi_task_state { + union iscsi_data_desc_ctx sgl_ctx_union; + __le32 buffer_offset[2]; + __le16 bytes_nxt_dif; + __le16 rxmit_bytes_nxt_dif; + union iscsi_seq_num seq_num_union; + u8 dif_bytes_leftover; + u8 rxmit_dif_bytes_leftover; + __le16 reuse_count; + struct iscsi_dif_flags dif_flags; + u8 local_comp; + __le32 exp_r2t_sn; + __le32 sgl_offset[2]; +}; + +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + union iscsi_task_hdr pdu_hdr; +}; + +struct ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +struct mstorm_iscsi_task_st_ctx { + union iscsi_mstorm_sgl sgl_union; + struct iscsi_dif_flags dif_flags; + struct iscsi_mflags flags; + u8 sgl_size; + u8 host_sge_index; + __le16 dix_cur_sge_offset; + __le16 dix_cur_sge_size; + __le32 data_offset_rtid; + u8 dif_offset; + u8 dix_sgl_size; + u8 dix_sge_index; + u8 task_type; + struct regpair sense_db; + struct regpair dix_sgl_cur_sge; + __le32 rem_task_size; + __le16 reuse_count; + __le16 dif_data_residue; + u8 reserved0[4]; + __le32 reserved1[1]; +}; + +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + u8 reserved2; + __le16 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +struct iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +enum iscsi_task_type { + ISCSI_TASK_TYPE_INITIATOR_WRITE, + ISCSI_TASK_TYPE_INITIATOR_READ, + ISCSI_TASK_TYPE_MIDPATH, + ISCSI_TASK_TYPE_UNSOLIC, + ISCSI_TASK_TYPE_EXCHCLEANUP, + ISCSI_TASK_TYPE_IRRELEVANT, + ISCSI_TASK_TYPE_TARGET_WRITE, + ISCSI_TASK_TYPE_TARGET_READ, + ISCSI_TASK_TYPE_TARGET_RESPONSE, + ISCSI_TASK_TYPE_LOGIN_RESPONSE, + MAX_ISCSI_TASK_TYPE +}; + +union iscsi_ttt_txlen_union { + __le32 desired_tx_len; + __le32 ttt; +}; + +struct iscsi_uhqe { + __le32 reg1; +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 + __le32 reg2; +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +}; + +struct iscsi_wqe_field { + __le32 contlen_cdbsize_field; +#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24 +}; + +union iscsi_wqe_field_union { + struct iscsi_wqe_field cont_field; + __le32 prev_tid; +}; + +struct iscsi_wqe { + __le16 task_id; + u8 flags; +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3 +#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1 +#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 + struct iscsi_dif_flags prot_flags; + union iscsi_wqe_field_union cont_prevtid_union; +}; + +enum iscsi_wqe_type { + ISCSI_WQE_TYPE_NORMAL, + ISCSI_WQE_TYPE_TASK_CLEANUP, + ISCSI_WQE_TYPE_MIDDLE_PATH, + ISCSI_WQE_TYPE_LOGIN, + ISCSI_WQE_TYPE_FIRST_R2T_CONT, + ISCSI_WQE_TYPE_NONFIRST_R2T_CONT, + ISCSI_WQE_TYPE_RESPONSE, + MAX_ISCSI_WQE_TYPE +}; + +struct iscsi_xhqe { + union iscsi_ttt_txlen_union ttt_or_txlen; + __le32 exp_stat_sn; + struct iscsi_dif_flags prot_flags; + u8 total_ahs_length; + u8 opcode; + u8 flags; +#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7 +#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 3 +#define ISCSI_XHQE_SUPER_IO_MASK 0x1 +#define ISCSI_XHQE_SUPER_IO_SHIFT 4 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 5 +#define ISCSI_XHQE_RESERVED_MASK 0x3 +#define ISCSI_XHQE_RESERVED_SHIFT 6 + union iscsi_seq_num seq_num_union; + __le16 reserved1; +}; + +struct mstorm_iscsi_stats_drv { + struct regpair iscsi_rx_dropped_pdus_task_not_valid; +}; + +struct ooo_opaque { + __le32 cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; +}; + +struct pstorm_iscsi_stats_drv { + struct regpair iscsi_tx_bytes_cnt; + struct regpair iscsi_tx_packet_cnt; +}; + +struct tstorm_iscsi_stats_drv { + struct regpair iscsi_rx_bytes_cnt; + struct regpair iscsi_rx_packet_cnt; + struct regpair iscsi_rx_new_ooo_isle_events_cnt; + __le32 iscsi_cmdq_threshold_cnt; + __le32 iscsi_rq_threshold_cnt; + __le32 iscsi_immq_threshold_cnt; +}; + +struct ustorm_iscsi_stats_drv { + struct regpair iscsi_rx_data_pdu_cnt; + struct regpair iscsi_rx_r2t_pdu_cnt; + struct regpair iscsi_rx_total_pdu_cnt; +}; + +struct xstorm_iscsi_stats_drv { + struct regpair iscsi_tx_go_to_slow_start_event_cnt; + struct regpair iscsi_tx_fast_retransmit_event_cnt; +}; + +struct ystorm_iscsi_stats_drv { + struct regpair iscsi_tx_data_pdu_cnt; + struct regpair iscsi_tx_r2t_pdu_cnt; + struct regpair iscsi_tx_total_pdu_cnt; +}; + +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; + +struct tstorm_iscsi_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +#endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 5f8fcaaa6504..7e441bdeabdc 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -25,10 +25,9 @@ } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) enum qed_chain_mode { /* Each Page contains a next pointer at its end */ @@ -47,16 +46,56 @@ enum qed_chain_use_mode { QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; +enum qed_chain_cnt_type { + /* The chain's size/prod/cons are kept in 16-bit variables */ + QED_CHAIN_CNT_TYPE_U16, + + /* The chain's size/prod/cons are kept in 32-bit variables */ + QED_CHAIN_CNT_TYPE_U32, +}; + struct qed_chain_next { struct regpair next_phys; void *next_virt; }; +struct qed_chain_pbl_u16 { + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain_pbl_u32 { + u32 prod_page_idx; + u32 cons_page_idx; +}; + struct qed_chain_pbl { + /* Base address of a pre-allocated buffer for pbl */ dma_addr_t p_phys_table; void *p_virt_table; - u16 prod_page_idx; - u16 cons_page_idx; + + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; + + /* Index to current used page by producer/consumer */ + union { + struct qed_chain_pbl_u16 pbl16; + struct qed_chain_pbl_u32 pbl32; + } u; +}; + +struct qed_chain_u16 { + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; +}; + +struct qed_chain_u32 { + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; }; struct qed_chain { @@ -64,13 +103,25 @@ struct qed_chain { dma_addr_t p_phys_addr; void *p_prod_elem; void *p_cons_elem; - u16 page_cnt; + enum qed_chain_mode mode; enum qed_chain_use_mode intended_use; /* used to produce/consume */ - u16 capacity; /*< number of _usable_ elements */ - u16 size; /* number of elements */ - u16 prod_idx; - u16 cons_idx; + enum qed_chain_cnt_type cnt_type; + + union { + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; + + u32 page_cnt; + + /* Number of elements - capacity is for usable elements only, + * while size will contain total number of elements [for entire chain]. + */ + u32 capacity; + u32 size; + + /* Elements information for fast calculations */ u16 elem_per_page; u16 elem_per_page_mask; u16 elem_unusable; @@ -96,66 +147,69 @@ struct qed_chain { #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) + /* Accessors */ static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) { - return p_chain->prod_idx; + return p_chain->u.chain16.prod_idx; } static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) { - return p_chain->cons_idx; + return p_chain->u.chain16.cons_idx; +} + +static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +{ + return p_chain->u.chain32.cons_idx; } static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { u16 used; - /* we don't need to trancate upon assignmet, as we assign u32->u16 */ - used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - - (u32)p_chain->cons_idx; + used = (u16) (((u32)0x10000 + + (u32)p_chain->u.chain16.prod_idx) - + (u32)p_chain->u.chain16.cons_idx); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->prod_idx / p_chain->elem_per_page - - p_chain->cons_idx / p_chain->elem_per_page; + used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - + p_chain->u.chain16.cons_idx / p_chain->elem_per_page; - return p_chain->capacity - used; + return (u16)(p_chain->capacity - used); } -static inline u8 qed_chain_is_full(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { - return qed_chain_get_elem_left(p_chain) == p_chain->capacity; -} + u32 used; -static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) -{ - return qed_chain_get_elem_left(p_chain) == 0; -} + used = (u32) (((u64)0x100000000ULL + + (u64)p_chain->u.chain32.prod_idx) - + (u64)p_chain->u.chain32.cons_idx); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - + p_chain->u.chain32.cons_idx / p_chain->elem_per_page; -static inline u16 qed_chain_get_elem_per_page( - struct qed_chain *p_chain) -{ - return p_chain->elem_per_page; + return p_chain->capacity - used; } -static inline u16 qed_chain_get_usable_per_page( - struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) { return p_chain->usable_per_page; } -static inline u16 qed_chain_get_unusable_per_page( - struct qed_chain *p_chain) +static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { return p_chain->elem_unusable; } -static inline u16 qed_chain_get_size(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) { - return p_chain->size; + return p_chain->page_cnt; } -static inline dma_addr_t -qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { return p_chain->pbl.p_phys_table; } @@ -172,65 +226,63 @@ qed_chain_get_pbl_phys(struct qed_chain *p_chain) */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, - void **p_next_elem, - u16 *idx_to_inc, - u16 *page_to_inc) + void **p_next_elem, void *idx_to_inc, void *page_to_inc) { + struct qed_chain_next *p_next = NULL; + u32 page_index = 0; switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: - { - struct qed_chain_next *p_next = *p_next_elem; + p_next = *p_next_elem; *p_next_elem = p_next->next_virt; - *idx_to_inc += p_chain->elem_unusable; + if (is_chain_u16(p_chain)) + *(u16 *)idx_to_inc += p_chain->elem_unusable; + else + *(u32 *)idx_to_inc += p_chain->elem_unusable; break; - } case QED_CHAIN_MODE_SINGLE: *p_next_elem = p_chain->p_virt_addr; break; case QED_CHAIN_MODE_PBL: - /* It is assumed pages are sequential, next element needs - * to change only when passing going back to first from last. - */ - if (++(*page_to_inc) == p_chain->page_cnt) { - *page_to_inc = 0; - *p_next_elem = p_chain->p_virt_addr; + if (is_chain_u16(p_chain)) { + if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) + *(u16 *)page_to_inc = 0; + page_index = *(u16 *)page_to_inc; + } else { + if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) + *(u32 *)page_to_inc = 0; + page_index = *(u32 *)page_to_inc; } + *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; } } #define is_unusable_idx(p, idx) \ - (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_idx_u32(p, idx) \ + (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) +#define is_unusable_next_idx(p, idx) \ + ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) -#define is_unusable_next_idx(p, idx) \ - ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) +#define is_unusable_next_idx_u32(p, idx) \ + ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) -#define test_ans_skip(p, idx) \ +#define test_and_skip(p, idx) \ do { \ - if (is_unusable_idx(p, idx)) { \ - (p)->idx += (p)->elem_unusable; \ + if (is_chain_u16(p)) { \ + if (is_unusable_idx(p, idx)) \ + (p)->u.chain16.idx += (p)->elem_unusable; \ + } else { \ + if (is_unusable_idx_u32(p, idx)) \ + (p)->u.chain32.idx += (p)->elem_unusable; \ } \ } while (0) /** - * @brief qed_chain_return_multi_produced - - * - * A chain in which the driver "Produces" elements should use this API - * to indicate previous produced elements are now consumed. - * - * @param p_chain - * @param num - */ -static inline void -qed_chain_return_multi_produced(struct qed_chain *p_chain, - u16 num) -{ - p_chain->cons_idx += num; - test_ans_skip(p_chain, cons_idx); -} - -/** * @brief qed_chain_return_produced - * * A chain in which the driver "Produces" elements should use this API @@ -240,8 +292,11 @@ qed_chain_return_multi_produced(struct qed_chain *p_chain, */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { - p_chain->cons_idx++; - test_ans_skip(p_chain, cons_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx++; + else + p_chain->u.chain32.cons_idx++; + test_and_skip(p_chain, cons_idx); } /** @@ -257,21 +312,33 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain) */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { - void *ret = NULL; - - if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == - p_chain->next_page_mask) { - qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, - &p_chain->prod_idx, - &p_chain->pbl.prod_page_idx); + void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain16.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain16.prod_idx++; + } else { + if ((p_chain->u.chain32.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain32.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain32.prod_idx++; } - ret = p_chain->p_prod_elem; - p_chain->prod_idx++; + p_ret = p_chain->p_prod_elem; p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + p_chain->elem_size); - return ret; + return p_ret; } /** @@ -282,9 +349,9 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) * @param p_chain * @param num * - * @return u16, number of unusable BDs + * @return number of unusable BDs */ -static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) +static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { return p_chain->capacity; } @@ -297,11 +364,13 @@ static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) * * @param p_chain */ -static inline void -qed_chain_recycle_consumed(struct qed_chain *p_chain) +static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { - test_ans_skip(p_chain, prod_idx); - p_chain->prod_idx++; + test_and_skip(p_chain, prod_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx++; + else + p_chain->u.chain32.prod_idx++; } /** @@ -316,21 +385,33 @@ qed_chain_recycle_consumed(struct qed_chain *p_chain) */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { - void *ret = NULL; - - if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == - p_chain->next_page_mask) { + void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain16.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain16.cons_idx++; + } else { + if ((p_chain->u.chain32.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain32.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, - &p_chain->cons_idx, - &p_chain->pbl.cons_page_idx); + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain32.cons_idx++; } - ret = p_chain->p_cons_elem; - p_chain->cons_idx++; + p_ret = p_chain->p_cons_elem; p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + p_chain->elem_size); - return ret; + return p_ret; } /** @@ -340,16 +421,33 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) */ static inline void qed_chain_reset(struct qed_chain *p_chain) { - int i; - - p_chain->prod_idx = 0; - p_chain->cons_idx = 0; - p_chain->p_cons_elem = p_chain->p_virt_addr; - p_chain->p_prod_elem = p_chain->p_virt_addr; + u32 i; + + if (is_chain_u16(p_chain)) { + p_chain->u.chain16.prod_idx = 0; + p_chain->u.chain16.cons_idx = 0; + } else { + p_chain->u.chain32.prod_idx = 0; + p_chain->u.chain32.cons_idx = 0; + } + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == QED_CHAIN_MODE_PBL) { - p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; - p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; + /* Use (page_cnt - 1) as a reset value for the prod/cons page's + * indices, to avoid unnecessary page advancing on the first + * call to qed_chain_produce/consume. Instead, the indices + * will be advanced to page_cnt and then will be wrapped to 0. + */ + u32 reset_val = p_chain->page_cnt - 1; + + if (is_chain_u16(p_chain)) { + p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; + p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; + } else { + p_chain->pbl.u.pbl32.prod_page_idx = reset_val; + p_chain->pbl.u.pbl32.cons_page_idx = reset_val; + } } switch (p_chain->intended_use) { @@ -377,168 +475,184 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) * @param intended_use * @param mode */ -static inline void qed_chain_init(struct qed_chain *p_chain, - void *p_virt_addr, - dma_addr_t p_phys_addr, - u16 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode) +static inline void qed_chain_init_params(struct qed_chain *p_chain, + u32 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) { /* chain fixed parameters */ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; + p_chain->p_virt_addr = NULL; + p_chain->p_phys_addr = 0; p_chain->elem_size = elem_size; - p_chain->page_cnt = page_cnt; + p_chain->intended_use = intended_use; p_chain->mode = mode; + p_chain->cnt_type = cnt_type; - p_chain->intended_use = intended_use; p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = - USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->elem_per_page_mask); - if (mode == QED_CHAIN_MODE_NEXT_PTR) { - struct qed_chain_next *p_next; - u16 i; - - for (i = 0; i < page_cnt - 1; i++) { - /* Increment mem_phy to the next page. */ - p_phys_addr += QED_CHAIN_PAGE_SIZE; - - /* Initialize the physical address of the next page. */ - p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + - elem_size * - p_chain-> - usable_per_page); - - p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); - p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); - - /* Initialize the virtual address of the next page. */ - p_next->next_virt = (void *)((u8 *)p_virt_addr + - QED_CHAIN_PAGE_SIZE); - - /* Move to the next page. */ - p_virt_addr = p_next->next_virt; - } - - /* Last page's next should point to beginning of the chain */ - p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + - elem_size * - p_chain->usable_per_page); + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; - p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); - p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); - p_next->next_virt = p_chain->p_virt_addr; - } - qed_chain_reset(p_chain); + p_chain->pbl.p_phys_table = 0; + p_chain->pbl.p_virt_table = NULL; + p_chain->pbl.pp_virt_addr_tbl = NULL; } /** - * @brief qed_chain_pbl_init - Initalizes a basic pbl chain - * struct + * @brief qed_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * * @param p_chain * @param p_virt_addr virtual address of allocated buffer's beginning * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param use_mode - * @param p_phys_pbl pointer to a pre-allocated side table - * which will hold physical page addresses. - * @param p_virt_pbl pointer to a pre allocated side table - * which will hold virtual page addresses. + * */ -static inline void -qed_chain_pbl_init(struct qed_chain *p_chain, - void *p_virt_addr, - dma_addr_t p_phys_addr, - u16 page_cnt, - u8 elem_size, - enum qed_chain_use_mode use_mode, - dma_addr_t p_phys_pbl, - dma_addr_t *p_virt_pbl) +static inline void qed_chain_init_mem(struct qed_chain *p_chain, + void *p_virt_addr, dma_addr_t p_phys_addr) { - dma_addr_t *p_pbl_dma = p_virt_pbl; - int i; - - qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, - elem_size, use_mode, QED_CHAIN_MODE_PBL); + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} +/** + * @brief qed_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + void **pp_virt_addr_tbl) +{ p_chain->pbl.p_phys_table = p_phys_pbl; p_chain->pbl.p_virt_table = p_virt_pbl; - - /* Fill the PBL with physical addresses*/ - for (i = 0; i < page_cnt; i++) { - *p_pbl_dma = p_phys_addr; - p_phys_addr += QED_CHAIN_PAGE_SIZE; - p_pbl_dma++; - } + p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; } /** - * @brief qed_chain_set_prod - sets the prod to the given - * value + * @brief qed_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page * - * @param prod_idx - * @param p_prod_elem */ -static inline void qed_chain_set_prod(struct qed_chain *p_chain, - u16 prod_idx, - void *p_prod_elem) +static inline void +qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, + void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) { - p_chain->prod_idx = prod_idx; - p_chain->p_prod_elem = p_prod_elem; + struct qed_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; } /** - * @brief qed_chain_get_elem - + * @brief qed_chain_get_last_elem - * - * get a pointer to an element represented by absolute idx + * Returns a pointer to the last element of the chain * * @param p_chain - * @assumption p_chain->size is a power of 2 * - * @return void*, a pointer to next element + * @return void* */ -static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, - u16 idx) +static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { - void *ret = NULL; - - if (idx >= p_chain->size) - return NULL; + struct qed_chain_next *p_next = NULL; + void *p_virt_addr = NULL; + u32 size, last_page_idx; - ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; + if (!p_chain->p_virt_addr) + goto out; - return ret; + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + size = p_chain->elem_size * p_chain->usable_per_page; + p_virt_addr = p_chain->p_virt_addr; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size); + while (p_next->next_virt != p_chain->p_virt_addr) { + p_virt_addr = p_next->next_virt; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + size); + } + break; + case QED_CHAIN_MODE_SINGLE: + p_virt_addr = p_chain->p_virt_addr; + break; + case QED_CHAIN_MODE_PBL: + last_page_idx = p_chain->page_cnt - 1; + p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + break; + } + /* p_virt_addr points at this stage to the last page of the chain */ + size = p_chain->elem_size * (p_chain->usable_per_page - 1); + p_virt_addr = (u8 *)p_virt_addr + size; +out: + return p_virt_addr; } /** - * @brief qed_chain_sge_inc_cons_prod + * @brief qed_chain_set_prod - sets the prod to the given value * - * for sge chains, producer isn't increased serially, the ring - * is expected to be full at all times. Once elements are - * consumed, they are immediately produced. + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u32 prod_idx, void *p_prod_elem) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx = (u16) prod_idx; + else + p_chain->u.chain32.prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_pbl_zero_mem - set chain memory to 0 * * @param p_chain - * @param cnt - * - * @return inline void */ -static inline void -qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, - u16 cnt) +static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { - p_chain->prod_idx += cnt; - p_chain->cons_idx += cnt; + u32 i, page_cnt; + + if (p_chain->mode != QED_CHAIN_MODE_PBL) + return; + + page_cnt = qed_chain_get_page_cnt(p_chain); + + for (i = 0; i < page_cnt; i++) + memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + QED_CHAIN_PAGE_SIZE); } #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 6c876a63558d..4475a9d8ae15 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -114,6 +114,7 @@ struct qed_queue_start_common_params { u8 vport_id; u16 sb; u16 sb_idx; + u16 vf_qid; }; struct qed_tunn_params { @@ -128,11 +129,73 @@ struct qed_eth_cb_ops { void (*force_mac) (void *dev, u8 *mac); }; +#ifdef CONFIG_DCB +/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration + * of dcbnl_rtnl_ops structure. + */ +struct qed_eth_dcbnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); + int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); + + /* CEE std */ + u8 (*getstate)(struct qed_dev *cdev); + u8 (*setstate)(struct qed_dev *cdev, u8 state); + void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); + void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); + u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); + int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); + u8 (*getpfcstate)(struct qed_dev *cdev); + int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); + u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); + + /* DCBX configuration */ + u8 (*getdcbx)(struct qed_dev *cdev); + void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + u8 (*setall)(struct qed_dev *cdev); + int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); + void (*setpfcstate)(struct qed_dev *cdev, u8 state); + int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); + u8 (*setdcbx)(struct qed_dev *cdev, u8 state); + u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); + + /* Peer apps */ + int (*peer_getappinfo)(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count); + int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); + + /* CEE peer */ + int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); + int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); +}; +#endif + struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV const struct qed_iov_hv_ops *iov; #endif +#ifdef CONFIG_DCB + const struct qed_eth_dcbnl_ops *dcb; +#endif int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4c29439f54bf..b1e3c57c7117 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -34,6 +34,96 @@ enum dcbx_protocol_type { DCBX_MAX_PROTOCOL_TYPE }; +#ifdef CONFIG_DCB +#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 +#define QED_LLDP_PORT_ID_STAT_LEN 4 +#define QED_DCBX_MAX_APP_PROTOCOL 32 +#define QED_MAX_PFC_PRIORITIES 8 +#define QED_DCBX_DSCP_SIZE 64 + +struct qed_dcbx_lldp_remote { + u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN]; + bool enable_rx; + bool enable_tx; + u32 tx_interval; + u32 max_credit; +}; + +struct qed_dcbx_lldp_local { + u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN]; +}; + +struct qed_dcbx_app_prio { + u8 roce; + u8 roce_v2; + u8 fcoe; + u8 iscsi; + u8 eth; +}; + +struct qed_dbcx_pfc_params { + bool willing; + bool enabled; + u8 prio[QED_MAX_PFC_PRIORITIES]; + u8 max_tc; +}; + +struct qed_app_entry { + bool ethtype; + bool enabled; + u8 prio; + u16 proto_id; + enum dcbx_protocol_type proto_type; +}; + +struct qed_dcbx_params { + struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL]; + u16 num_app_entries; + bool app_willing; + bool app_valid; + bool app_error; + bool ets_willing; + bool ets_enabled; + bool ets_cbs; + bool valid; + u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES]; + struct qed_dbcx_pfc_params pfc; + u8 max_ets_tc; +}; + +struct qed_dcbx_admin_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_remote_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_operational_params { + struct qed_dcbx_app_prio app_prio; + struct qed_dcbx_params params; + bool valid; + bool enabled; + bool ieee; + bool cee; + u32 err; +}; + +struct qed_dcbx_get { + struct qed_dcbx_operational_params operational; + struct qed_dcbx_lldp_remote lldp_remote; + struct qed_dcbx_lldp_local lldp_local; + struct qed_dcbx_remote_params remote; + struct qed_dcbx_admin_params local; +}; +#endif + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -58,8 +148,70 @@ struct qed_eth_pf_params { u16 num_cons; }; +/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +struct qed_iscsi_pf_params { + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + u32 max_cwnd; + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 dup_ack_threshold; + u16 tx_sws_timer; + u16 min_rto; + u16 min_rto_rt; + u16 max_rto; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 half_way_close_timeout; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 cmdq_xoff_threshold; + u16 cmdq_xon_threshold; + u16 rq_buffer_size; + + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 log_page_size; + u8 rqe_log_size; + u8 max_fin_rt; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + u8 ooo_enable; + + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + +struct qed_rdma_pf_params { + /* Supplied to QED during resource allocation (may affect the ILT and + * the doorbell BAR). + */ + u32 min_dpis; /* number of requested DPIs */ + u32 num_mrs; /* number of requested memory regions */ + u32 num_qps; /* number of requested Queue Pairs */ + u32 num_srqs; /* number of requested SRQ */ + u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ + u8 gl_pi; /* protocol index */ + + /* Will allocate rate limiters to be used with QPs */ + u8 enable_dcqcn; +}; + struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; + struct qed_iscsi_pf_params iscsi_pf_params; + struct qed_rdma_pf_params rdma_pf_params; }; enum qed_int_mode { @@ -100,6 +252,8 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; + bool rdma_supported; + u32 flash_size; u8 mf_mode; bool tx_switching; @@ -111,6 +265,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, + QED_PROTOCOL_ISCSI, }; struct qed_link_params { @@ -325,7 +480,8 @@ struct qed_common_ops { int (*chain_alloc)(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, - u16 num_elems, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain); @@ -333,6 +489,30 @@ struct qed_common_ops { struct qed_chain *p_chain); /** + * @brief get_coalesce - Get coalesce parameters in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * + */ + void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal); + +/** + * @brief set_coalesce - Configure Rx coalesce value in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * @param qid - Queue index + * @param sb_id - Status Block Id + * + * @return 0 on success, error otherwise. + */ + int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id); + +/** * @brief set_led - Configure LED mode * * @param cdev diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h new file mode 100644 index 000000000000..187991c1f439 --- /dev/null +++ b/include/linux/qed/rdma_common.h @@ -0,0 +1,44 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __RDMA_COMMON__ +#define __RDMA_COMMON__ +/************************/ +/* RDMA FW CONSTANTS */ +/************************/ + +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) + +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) + +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) + +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) + +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) + +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) + +struct rdma_srq_id { + __le16 srq_idx; + __le16 opaque_fid; +}; + +struct rdma_srq_producers { + __le32 sge_prod; + __le32 wqe_prod; +}; + +#endif /* __RDMA_COMMON__ */ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h new file mode 100644 index 000000000000..2eeaf3dc6646 --- /dev/null +++ b/include/linux/qed/roce_common.h @@ -0,0 +1,17 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ROCE_COMMON__ +#define __ROCE_COMMON__ + +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) + +#define ROCE_MAX_QPS (32 * 1024) + +#endif /* __ROCE_COMMON__ */ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h new file mode 100644 index 000000000000..3b8e1efd9bc2 --- /dev/null +++ b/include/linux/qed/storage_common.h @@ -0,0 +1,91 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __STORAGE_COMMON__ +#define __STORAGE_COMMON__ + +#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) + +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_NUM_IDS (2) + +#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) + +struct scsi_bd { + struct regpair address; + struct regpair opaque; +}; + +struct scsi_bdq_ram_drv_data { + __le16 external_producer; + __le16 reserved0[3]; +}; + +struct scsi_drv_cmdq { + __le16 cmdq_cons; + __le16 reserved0; + __le32 reserved1; +}; + +struct scsi_init_func_params { + __le16 num_tasks; + u8 log_page_size; + u8 debug_mode; + u8 reserved2[12]; +}; + +struct scsi_init_func_queues { + struct regpair glbl_q_params_addr; + __le16 rq_buffer_size; + __le16 cq_num_entries; + __le16 cmdq_num_entries; + u8 bdq_resource_id; + u8 q_validity; +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 + u8 num_queues; + u8 queue_relative_offset; + u8 cq_sb_pi; + u8 cmdq_sb_pi; + __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; + __le16 reserved0; + u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; + __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; + __le16 cmdq_xoff_threshold; + __le16 cmdq_xon_threshold; + __le32 reserved1; +}; + +struct scsi_ram_per_bdq_resource_drv_data { + struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; +}; + +struct scsi_sge { + struct regpair sge_addr; + __le16 sge_len; + __le16 reserved0; + __le32 reserved1; +}; + +struct scsi_terminate_extra_params { + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 reserved[4]; +}; + +#endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h new file mode 100644 index 000000000000..accba0e6b704 --- /dev/null +++ b/include/linux/qed/tcp_common.h @@ -0,0 +1,226 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __TCP_COMMON__ +#define __TCP_COMMON__ + +#define TCP_INVALID_TIMEOUT_VAL -1 + +enum tcp_connect_mode { + TCP_CONNECT_ACTIVE, + TCP_CONNECT_PASSIVE, + MAX_TCP_CONNECT_MODE +}; + +struct tcp_init_params { + __le32 max_cwnd; + __le16 dup_ack_threshold; + __le16 tx_sws_timer; + __le16 min_rto; + __le16 min_rto_rt; + __le16 max_rto; + u8 maxfinrt; + u8 reserved[1]; +}; + +enum tcp_ip_version { + TCP_IPV4, + TCP_IPV6, + MAX_TCP_IP_VERSION +}; + +struct tcp_offload_params { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + u8 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 + u8 ip_version; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 srtt; + __le32 cwnd; + __le32 ss_thresh; + __le16 reserved1; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 rcv_wnd; + __le32 snd_wl1; + __le32 ts_time; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le16 rtt_var; + __le16 reserved2; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; + u8 snd_wnd_scale; + u8 ack_frequency; + __le16 da_timeout_value; + __le32 ts_ticks_per_second; +}; + +struct tcp_offload_params_opt2 { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + u8 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 + u8 ip_version; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 syn_ip_payload_length; + __le32 syn_phy_addr_lo; + __le32 syn_phy_addr_hi; + __le32 reserved1[22]; +}; + +enum tcp_seg_placement_event { + TCP_EVENT_ADD_PEN, + TCP_EVENT_ADD_NEW_ISLE, + TCP_EVENT_ADD_ISLE_RIGHT, + TCP_EVENT_ADD_ISLE_LEFT, + TCP_EVENT_JOIN, + TCP_EVENT_NOP, + MAX_TCP_SEG_PLACEMENT_EVENT +}; + +struct tcp_update_params { + __le16 flags; +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 mss; + u8 ttl; + u8 tos_or_tc; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 flow_label; + __le32 initial_rcv_wnd; + u8 ka_max_probe_cnt; + u8 reserved1[7]; +}; + +struct tcp_upload_params { + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 rcv_wnd; + __le32 snd_wl1; + __le32 cwnd; + __le32 ss_thresh; + __le16 srtt; + __le16 rtt_var; + __le32 ts_time; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le32 reserved; +}; + +#endif /* __TCP_COMMON__ */ diff --git a/include/linux/quota.h b/include/linux/quota.h index 9dfb6bce8c9e..8486d27cf360 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -200,8 +200,8 @@ struct mem_dqblk { qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ qsize_t dqb_isoftlimit; /* preferred inode limit */ qsize_t dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ + time64_t dqb_btime; /* time limit for excessive disk use */ + time64_t dqb_itime; /* time limit for excessive inode use */ }; /* diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index eca6f626c16e..cbfee507c839 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -291,6 +291,7 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); diff --git a/include/linux/random.h b/include/linux/random.h index e47e533742b5..3d6e9815cd85 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) #ifdef CONFIG_ARCH_RANDOM # include <asm/archrandom.h> #else -static inline int arch_get_random_long(unsigned long *v) +static inline bool arch_get_random_long(unsigned long *v) { return 0; } -static inline int arch_get_random_int(unsigned int *v) +static inline bool arch_get_random_int(unsigned int *v) { return 0; } -static inline int arch_has_random(void) +static inline bool arch_has_random(void) { return 0; } -static inline int arch_get_random_seed_long(unsigned long *v) +static inline bool arch_get_random_seed_long(unsigned long *v) { return 0; } -static inline int arch_get_random_seed_int(unsigned int *v) +static inline bool arch_get_random_seed_int(unsigned int *v) { return 0; } -static inline int arch_has_random_seed(void) +static inline bool arch_has_random_seed(void) { return 0; } diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index b6900099ea81..e585018498d5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -76,6 +76,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *); /* Fast replacement of a single node without remove/rebalance/add/rebalance */ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); +extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 14d7b831b63a..d076183e49be 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -130,6 +130,19 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, WRITE_ONCE(root->rb_node, new); } +static inline void +__rb_change_child_rcu(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + rcu_assign_pointer(parent->rb_left, new); + else + rcu_assign_pointer(parent->rb_right, new); + } else + rcu_assign_pointer(root->rb_node, new); +} + extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5f1533e3d032..1aa62e1a761b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -45,6 +45,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/ktime.h> +#include <linux/irqflags.h> #include <asm/barrier.h> @@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void) * in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) - * will tell RCU that it needs to pay attending, invoke its argument - * (in this example, a call to the do_something_with_RCU() function), + * will tell RCU that it needs to pay attention, invoke its argument + * (in this example, calling the do_something_with_RCU() function), * and then tell RCU to go back to ignoring this CPU. It is permissible - * to nest RCU_NONIDLE() wrappers, but the nesting level is currently - * quite limited. If deeper nesting is required, it will be necessary - * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is + * on the order of a million or so, even on 32-bit systems). It is + * not legal to block within RCU_NONIDLE(), nor is it permissible to + * transfer control either into or out of RCU_NONIDLE()'s statement. */ #define RCU_NONIDLE(a) \ do { \ @@ -611,6 +613,12 @@ static inline void rcu_preempt_sleep_check(void) rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) +#define rcu_dereference_raw(p) \ +({ \ + /* Dependency order vs. p above. */ \ + typeof(p) ________p1 = lockless_dereference(p); \ + ((typeof(*p) __force __kernel *)(________p1)); \ +}) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable @@ -649,7 +657,16 @@ static inline void rcu_preempt_sleep_check(void) * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ -#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing @@ -729,8 +746,6 @@ static inline void rcu_preempt_sleep_check(void) __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ __rcu) -#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ - /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 3dc08ce15426..2c12cc5af744 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -95,6 +95,45 @@ struct reg_sequence { #define regmap_fields_force_update_bits(field, id, mask, val) \ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) +/** + * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs + * @map: Regmap to read from + * @addr: Address to poll + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ +({ \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + int ret; \ + might_sleep_if(sleep_us); \ + for (;;) { \ + ret = regmap_read((map), (addr), &(val)); \ + if (ret) \ + break; \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + ret = regmap_read((map), (addr), &(val)); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + ret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + #ifdef CONFIG_REGMAP enum regmap_endian { @@ -851,6 +890,12 @@ struct regmap_irq { * @num_type_reg: Number of type registers. * @type_reg_stride: Stride to use for chips where type registers are not * contiguous. + * @handle_pre_irq: Driver specific callback to handle interrupt from device + * before regmap_irq_handler process the interrupts. + * @handle_post_irq: Driver specific callback to handle interrupt from device + * after handling the interrupts in regmap_irq_handler(). + * @irq_drv_data: Driver specific IRQ data which is passed as parameter when + * driver specific pre/post interrupt handler is called. */ struct regmap_irq_chip { const char *name; @@ -877,6 +922,10 @@ struct regmap_irq_chip { int num_type_reg; unsigned int type_reg_stride; + + int (*handle_pre_irq)(void *irq_drv_data); + int (*handle_post_irq)(void *irq_drv_data); + void *irq_drv_data; }; struct regmap_irq_chip_data; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 48603506f8de..cae500b2c1d7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -224,7 +224,6 @@ int regulator_bulk_force_disable(int num_consumers, void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers); -int regulator_can_change_voltage(struct regulator *regulator); int regulator_count_voltages(struct regulator *regulator); int regulator_list_voltage(struct regulator *regulator, unsigned selector); int regulator_is_supported_voltage(struct regulator *regulator, @@ -436,11 +435,6 @@ static inline void regulator_bulk_free(int num_consumers, { } -static inline int regulator_can_change_voltage(struct regulator *regulator) -{ - return 0; -} - static inline int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index a43a5ca1167b..80cb40b7c88d 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -1,5 +1,6 @@ /* - * da9211.h - Regulator device driver for DA9211/DA9213/DA9215 + * da9211.h - Regulator device driver for DA9211/DA9212 + * /DA9213/DA9214/DA9215 * Copyright (C) 2015 Dialog Semiconductor Ltd. * * This program is free software; you can redistribute it and/or @@ -22,7 +23,9 @@ enum da9211_chip_id { DA9211, + DA9212, DA9213, + DA9214, DA9215, }; diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h new file mode 100644 index 000000000000..67011cd1ce55 --- /dev/null +++ b/include/linux/regulator/mt6323-regulator.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 MediaTek Inc. + * Author: Chen Zhong <chen.zhong@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6323_H +#define __LINUX_REGULATOR_MT6323_H + +enum { + MT6323_ID_VPROC = 0, + MT6323_ID_VSYS, + MT6323_ID_VPA, + MT6323_ID_VTCXO, + MT6323_ID_VCN28, + MT6323_ID_VCN33_BT, + MT6323_ID_VCN33_WIFI, + MT6323_ID_VA, + MT6323_ID_VCAMA, + MT6323_ID_VIO28 = 9, + MT6323_ID_VUSB, + MT6323_ID_VMC, + MT6323_ID_VMCH, + MT6323_ID_VEMC3V3, + MT6323_ID_VGP1, + MT6323_ID_VGP2, + MT6323_ID_VGP3, + MT6323_ID_VCN18, + MT6323_ID_VSIM1, + MT6323_ID_VSIM2, + MT6323_ID_VRTC, + MT6323_ID_VCAMAF, + MT6323_ID_VIBR, + MT6323_ID_VRF18, + MT6323_ID_VM, + MT6323_ID_VIO18, + MT6323_ID_VCAMD, + MT6323_ID_VCAMIO, + MT6323_ID_RG_MAX, +}; + +#define MT6323_MAX_REGULATOR MT6323_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6323_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2b0fad83683f..b46bb5620a76 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -165,7 +165,7 @@ void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); -void page_add_file_rmap(struct page *); +void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index c006cc900c44..2daece8979f7 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -89,8 +89,9 @@ void net_inc_egress_queue(void); void net_dec_egress_queue(void); #endif -extern void rtnetlink_init(void); -extern void __rtnl_unlock(void); +void rtnetlink_init(void); +void __rtnl_unlock(void); +void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); #define ASSERT_RTNL() do { \ if (unlikely(!rtnl_is_locked())) { \ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index d37fbb34d06f..dd1d14250340 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -23,10 +23,11 @@ struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #include <linux/rwsem-spinlock.h> /* use a generic implementation */ +#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE #else /* All arch specific implementations share the same struct */ struct rw_semaphore { - long count; + atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER @@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { - return sem->count != 0; + return atomic_long_read(&sem->count) != 0; } +#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) #endif /* Common initializer macros and functions */ @@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) #endif #define __RWSEM_INITIALIZER(name) \ - { .count = RWSEM_UNLOCKED_VALUE, \ + { __RWSEM_INIT_COUNT(name), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index a53915cd5581..c68307bc306f 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -35,21 +35,24 @@ struct sockaddr_rxrpc { */ #define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ -#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ /* * RxRPC control messages + * - If neither abort or accept are specified, the message is a data message. * - terminal messages mean that a user call ID tag can be recycled + * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() */ -#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */ -#define RXRPC_ABORT 2 /* abort request / notification [terminal] */ -#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */ -#define RXRPC_NET_ERROR 5 /* network error received [terminal] */ -#define RXRPC_BUSY 6 /* server busy received [terminal] */ -#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */ -#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */ -#define RXRPC_ACCEPT 9 /* [Server] accept request */ +#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */ +#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */ +#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */ +#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */ +#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */ +#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */ +#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ +#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ +#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ /* * RxRPC security levels diff --git a/include/linux/sched.h b/include/linux/sched.h index 253538f29ade..553af2923824 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p); #define TASK_WAKING 256 #define TASK_PARKED 512 #define TASK_NOLOAD 1024 -#define TASK_STATE_MAX 2048 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -522,6 +523,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ #define MMF_OOM_REAPED 21 /* mm has been already reaped */ +#define MMF_OOM_NOT_REAPABLE 22 /* mm couldn't be reaped */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -1948,6 +1950,32 @@ static inline int tsk_nr_cpus_allowed(struct task_struct *p) #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10 +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); @@ -2139,6 +2167,9 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 48ec7651989b..923266cd294a 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -111,6 +111,7 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; + struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index a3d7c0d4a03e..2f44e2013654 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -352,9 +352,15 @@ struct earlycon_id { extern const struct earlycon_id __earlycon_table[]; extern const struct earlycon_id __earlycon_table_end[]; +#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) +#define EARLYCON_USED_OR_UNUSED __used +#else +#define EARLYCON_USED_OR_UNUSED __maybe_unused +#endif + #define OF_EARLYCON_DECLARE(_name, compat, fn) \ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ - __used __section(__earlycon_table) \ + EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ = { .name = __stringify(_name), \ .compatible = compat, \ .setup = fn } diff --git a/include/linux/sfi.h b/include/linux/sfi.h index d9b436f09925..e0e1597ef9e6 100644 --- a/include/linux/sfi.h +++ b/include/linux/sfi.h @@ -156,6 +156,7 @@ struct sfi_device_table_entry { #define SFI_DEV_TYPE_UART 2 #define SFI_DEV_TYPE_HSI 3 #define SFI_DEV_TYPE_IPC 4 +#define SFI_DEV_TYPE_SD 5 u8 host_num; /* attached to host 0, 1...*/ u16 addr; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 4d4780c00d34..ff078e7043b6 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -16,8 +16,9 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - struct shared_policy policy; /* NUMA memory alloc policy */ + struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ + struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ struct inode vfs_inode; }; @@ -28,10 +29,14 @@ struct shmem_sb_info { unsigned long max_inodes; /* How many inodes are allowed */ unsigned long free_inodes; /* How many are left for allocation */ spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + umode_t mode; /* Mount mode for root directory */ + unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ - umode_t mode; /* Mount mode for root directory */ struct mempolicy *mpol; /* default memory policy for mappings */ + spinlock_t shrinklist_lock; /* Protects shrinklist */ + struct list_head shrinklist; /* List of shinkable inodes */ + unsigned long shrinklist_len; /* Length of shrinklist */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) @@ -49,6 +54,8 @@ extern struct file *shmem_file_setup(const char *name, extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); +extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); extern void shmem_unlock_mapping(struct address_space *mapping); @@ -61,6 +68,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +/* Flag allocation requirements to shmem_getpage */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ + SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ + SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ +}; + +extern int shmem_getpage(struct inode *inode, pgoff_t index, + struct page **pagep, enum sgp_type sgp); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { @@ -68,6 +88,18 @@ static inline struct page *shmem_read_mapping_page( mapping_gfp_mask(mapping)); } +static inline bool shmem_file(struct file *file) +{ + if (!IS_ENABLED(CONFIG_SHMEM)) + return false; + if (!file || !file->f_mapping) + return false; + return shmem_mapping(file->f_mapping); +} + +extern bool shmem_charge(struct inode *inode, long pages); +extern void shmem_uncharge(struct inode *inode, long pages); + #ifdef CONFIG_TMPFS extern int shmem_add_seals(struct file *file, unsigned int seals); @@ -83,4 +115,13 @@ static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) #endif +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +extern bool shmem_huge_enabled(struct vm_area_struct *vma); +#else +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return false; +} +#endif + #endif diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h new file mode 100644 index 000000000000..f4dfade428f0 --- /dev/null +++ b/include/linux/skb_array.h @@ -0,0 +1,178 @@ +/* + * Definitions for the 'struct skb_array' datastructure. + * + * Author: + * Michael S. Tsirkin <mst@redhat.com> + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Limited-size FIFO of skbs. Can be used more or less whenever + * sk_buff_head can be used, except you need to know the queue size in + * advance. + * Implemented as a type-safe wrapper around ptr_ring. + */ + +#ifndef _LINUX_SKB_ARRAY_H +#define _LINUX_SKB_ARRAY_H 1 + +#ifdef __KERNEL__ +#include <linux/ptr_ring.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#endif + +struct skb_array { + struct ptr_ring ring; +}; + +/* Might be slightly faster than skb_array_full below, but callers invoking + * this in a loop must use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_full(struct skb_array *a) +{ + return __ptr_ring_full(&a->ring); +} + +static inline bool skb_array_full(struct skb_array *a) +{ + return ptr_ring_full(&a->ring); +} + +static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce(&a->ring, skb); +} + +static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_irq(&a->ring, skb); +} + +static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_bh(&a->ring, skb); +} + +static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_any(&a->ring, skb); +} + +/* Might be slightly faster than skb_array_empty below, but only safe if the + * array is never resized. Also, callers invoking this in a loop must take care + * to use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_empty(struct skb_array *a) +{ + return !__ptr_ring_peek(&a->ring); +} + +static inline bool skb_array_empty(struct skb_array *a) +{ + return ptr_ring_empty(&a->ring); +} + +static inline bool skb_array_empty_bh(struct skb_array *a) +{ + return ptr_ring_empty_bh(&a->ring); +} + +static inline bool skb_array_empty_irq(struct skb_array *a) +{ + return ptr_ring_empty_irq(&a->ring); +} + +static inline bool skb_array_empty_any(struct skb_array *a) +{ + return ptr_ring_empty_any(&a->ring); +} + +static inline struct sk_buff *skb_array_consume(struct skb_array *a) +{ + return ptr_ring_consume(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) +{ + return ptr_ring_consume_irq(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) +{ + return ptr_ring_consume_any(&a->ring); +} + +static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) +{ + return ptr_ring_consume_bh(&a->ring); +} + +static inline int __skb_array_len_with_tag(struct sk_buff *skb) +{ + if (likely(skb)) { + int len = skb->len; + + if (skb_vlan_tag_present(skb)) + len += VLAN_HLEN; + + return len; + } else { + return 0; + } +} + +static inline int skb_array_peek_len(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_irq(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_bh(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_any(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_init(&a->ring, size, gfp); +} + +static void __skb_array_destroy_skb(void *ptr) +{ + kfree_skb(ptr); +} + +static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); +} + +static inline int skb_array_resize_multiple(struct skb_array **rings, + int nrings, int size, gfp_t gfp) +{ + BUILD_BUG_ON(offsetof(struct skb_array, ring)); + return ptr_ring_resize_multiple((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); +} + +static inline void skb_array_cleanup(struct skb_array *a) +{ + ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); +} + +#endif /* _LINUX_SKB_ARRAY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f39b37180c41..6f0b3e0adc73 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,6 +37,7 @@ #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> +#include <linux/if_packet.h> #include <net/flow.h> /* The interface for checksum offload between the stack and networking drivers @@ -301,6 +302,11 @@ struct sk_buff; #endif extern int sysctl_max_skb_frags; +/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to + * segment using its current segmentation instead. + */ +#define GSO_BY_FRAGS 0xFFFF + typedef struct skb_frag_struct skb_frag_t; struct skb_frag_struct { @@ -482,6 +488,8 @@ enum { SKB_GSO_PARTIAL = 1 << 13, SKB_GSO_TUNNEL_REMCSUM = 1 << 14, + + SKB_GSO_SCTP = 1 << 15, }; #if BITS_PER_LONG > 32 @@ -874,6 +882,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) return (struct rtable *)skb_dst(skb); } +/* For mangling skb->pkt_type from user space side from applications + * such as nft, tc, etc, we only allow a conservative subset of + * possible pkt_types to be set. +*/ +static inline bool skb_pkt_type_ok(u32 ptype) +{ + return ptype <= PACKET_OTHERHOST; +} + void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); @@ -3007,6 +3024,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); +bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); diff --git a/include/linux/slab.h b/include/linux/slab.h index aeb3e6d00a66..1a4ea551aae5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -565,6 +565,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc(n * size, flags); return __kmalloc(n * size, flags); } diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8694f7a5d92b..4ad2c5a26399 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -81,14 +81,15 @@ struct kmem_cache { #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM - void *random_seq; + unsigned int *random_seq; #endif struct kmem_cache_node *node[MAX_NUMNODES]; }; static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, - void *x) { + void *x) +{ void *object = x - (x - page->s_mem) % cache->size; void *last_object = page->s_mem + (cache->num - 1) * cache->size; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d1faa019c02a..75f56c2ef2d4 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -99,6 +99,15 @@ struct kmem_cache { */ int remote_node_defrag_ratio; #endif + +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; @@ -114,15 +123,17 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); +void *fixup_red_left(struct kmem_cache *s, void *p); + static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, void *x) { void *object = x - (x - page_address(page)) % cache->size; void *last_object = page_address(page) + (page->objects - 1) * cache->size; - if (unlikely(object > last_object)) - return last_object; - else - return object; + void *result = (unlikely(object > last_object)) ? last_object : object; + + result = fixup_red_left(cache, result); + return result; } #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 1f03483f61e5..072cb2aa2413 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -312,8 +312,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @flags: other constraints relevant to this driver * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. + * @io_mutex: mutex for physical bus access * @bus_lock_spinlock: spinlock for SPI bus locking - * @bus_lock_mutex: mutex for SPI bus locking + * @bus_lock_mutex: mutex for exclusion of multiple callers * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This @@ -446,6 +447,9 @@ struct spi_master { */ size_t (*max_transfer_size)(struct spi_device *spi); + /* I/O mutex */ + struct mutex io_mutex; + /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -1143,6 +1147,8 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * @opcode_nbits: number of lines to send opcode * @addr_nbits: number of lines to send address * @data_nbits: number of lines for data + * @rx_sg: Scatterlist for receive data read from flash + * @cur_msg_mapped: message has been mapped for DMA */ struct spi_flash_read_message { void *buf; @@ -1155,6 +1161,8 @@ struct spi_flash_read_message { u8 opcode_nbits; u8 addr_nbits; u8 data_nbits; + struct sg_table rx_sg; + bool cur_msg_mapped; }; /* SPI core interface for flash read support */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8b3ac0d718eb..0d9848de677d 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -6,6 +6,7 @@ #endif #include <asm/processor.h> /* for cpu_relax() */ +#include <asm/barrier.h> /* * include/linux/spinlock_up.h - UP-debug version of spinlocks. @@ -25,6 +26,11 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + smp_cond_load_acquire(&lock->slock, VAL); +} + static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; @@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) @@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_read_can_lock(lock) (((void)(lock), 1)) #define arch_write_can_lock(lock) (((void)(lock), 1)) -#define arch_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (arch_spin_is_locked(lock)) - #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index ffdaca9c01af..705840e0438f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -135,9 +135,12 @@ struct plat_stmmacenet_data { void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); + void (*suspend)(struct platform_device *pdev, void *priv); + void (*resume)(struct platform_device *pdev, void *priv); void *bsp_priv; struct stmmac_axi *axi; int has_gmac4; bool tso_en; + int mac_port_sel_speed; }; #endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h index 451771d9b9c0..7c2d95170d01 100644 --- a/include/linux/stringhash.h +++ b/include/linux/stringhash.h @@ -3,6 +3,7 @@ #include <linux/compiler.h> /* For __pure */ #include <linux/types.h> /* For u32, u64 */ +#include <linux/hash.h> /* * Routines for hashing strings of bytes to a 32-bit hash value. @@ -34,7 +35,7 @@ */ /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ -#define init_name_hash() 0 +#define init_name_hash(salt) (unsigned long)(salt) /* partial hash update function. Assume roughly 4 bits per character */ static inline unsigned long @@ -45,11 +46,12 @@ partial_name_hash(unsigned long c, unsigned long prevhash) /* * Finally: cut down the number of bits to a int value (and try to avoid - * losing bits) + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. */ static inline unsigned long end_name_hash(unsigned long hash) { - return (unsigned int)hash; + return __hash_32((unsigned int)hash); } /* @@ -60,7 +62,7 @@ static inline unsigned long end_name_hash(unsigned long hash) * * If not set, this falls back to a wrapper around the preceding. */ -extern unsigned int __pure full_name_hash(const char *, unsigned int); +extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int); /* * A hash_len is a u64 with the hash of a string in the low @@ -71,6 +73,6 @@ extern unsigned int __pure full_name_hash(const char *, unsigned int); #define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) /* Return the "hash_len" (hash and length) of a null-terminated string */ -extern u64 __pure hashlen_string(const char *name); +extern u64 __pure hashlen_string(const void *salt, const char *name); #endif /* __LINUX_STRINGHASH_H */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 91d5a5d6f52b..d03932055328 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -172,12 +172,12 @@ extern void unix_gid_cache_destroy(struct net *net); */ static inline unsigned long hash_str(char const *name, int bits) { - return hashlen_hash(hashlen_string(name)) >> (32 - bits); + return hashlen_hash(hashlen_string(NULL, name)) >> (32 - bits); } static inline unsigned long hash_mem(char const *buf, int length, int bits) { - return full_name_hash(buf, length) >> (32 - bits); + return full_name_hash(NULL, buf, length) >> (32 - bits); } #endif /* __KERNEL__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8b6ec7ef0854..7693e39b14fe 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch) #endif #ifdef CONFIG_VT_CONSOLE_SLEEP -extern int pm_prepare_console(void); +extern void pm_prepare_console(void); extern void pm_restore_console(void); #else -static inline int pm_prepare_console(void) +static inline void pm_prepare_console(void) { - return 0; } static inline void pm_restore_console(void) diff --git a/include/linux/swap.h b/include/linux/swap.h index 0af2bb2028fd..b17cc4830fa6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,15 +157,6 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -/* - * Ratio between zone->managed_pages and the "gap" that above the per-zone - * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that - * do not meet the (high_wmark + gap) watermark, even which already met the - * high_wmark, in order to provide better per-zone lru behavior. We are ok to - * spend not more than 1% of the memory for this zone balancing "gap". - */ -#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 - #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ @@ -317,6 +308,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); +extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); @@ -324,9 +316,9 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, bool may_swap); -extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, +extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, - struct zone *zone, + pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; @@ -334,13 +326,14 @@ extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; #ifdef CONFIG_NUMA -extern int zone_reclaim_mode; +extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; -extern int zone_reclaim(struct zone *, gfp_t, unsigned int); +extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else -#define zone_reclaim_mode 0 -static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) +#define node_reclaim_mode 0 +static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, + unsigned int order) { return 0; } diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 0a0d56834c8e..f2293028ab9d 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -71,7 +71,7 @@ struct st_proto_s { enum proto_type type; long (*recv) (void *, struct sk_buff *); unsigned char (*match_packet) (const unsigned char *data); - void (*reg_complete_cb) (void *, char data); + void (*reg_complete_cb) (void *, int data); long (*write) (struct sk_buff *skb); void *priv_data; diff --git a/include/linux/time.h b/include/linux/time.h index 297f09f23896..4cea09d94208 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -205,7 +205,20 @@ struct tm { int tm_yday; }; -void time_to_tm(time_t totalsecs, int offset, struct tm *result); +void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); + +/** + * time_to_tm - converts the calendar time to local broken-down time + * + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, + * Coordinated Universal Time (UTC). + * @offset offset seconds adding to totalsecs. + * @result pointer to struct tm variable to receive broken-down time + */ +static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) +{ + time64_to_tm(totalsecs, offset, result); +} /** * timespec_to_ns - Convert timespec to nanoseconds diff --git a/include/linux/timer.h b/include/linux/timer.h index 20ac746f3eb3..4419506b564e 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -19,7 +19,6 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; u32 flags; - int slack; #ifdef CONFIG_TIMER_STATS int start_pid; @@ -58,11 +57,14 @@ struct timer_list { * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! */ -#define TIMER_CPUMASK 0x0007FFFF -#define TIMER_MIGRATING 0x00080000 +#define TIMER_CPUMASK 0x0003FFFF +#define TIMER_MIGRATING 0x00040000 #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) -#define TIMER_DEFERRABLE 0x00100000 +#define TIMER_DEFERRABLE 0x00080000 +#define TIMER_PINNED 0x00100000 #define TIMER_IRQSAFE 0x00200000 +#define TIMER_ARRAYSHIFT 22 +#define TIMER_ARRAYMASK 0xFFC00000 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ @@ -70,7 +72,6 @@ struct timer_list { .expires = (_expires), \ .data = (_data), \ .flags = (_flags), \ - .slack = -1, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } @@ -78,9 +79,15 @@ struct timer_list { #define TIMER_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), 0) +#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) + #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) +#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) + #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) @@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define init_timer(timer) \ __init_timer((timer), 0) +#define init_timer_pinned(timer) \ + __init_timer((timer), TIMER_PINNED) #define init_timer_deferrable(timer) \ __init_timer((timer), TIMER_DEFERRABLE) +#define init_timer_pinned_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) #define init_timer_on_stack(timer) \ __init_timer_on_stack((timer), 0) @@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), 0) +#define setup_pinned_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_PINNED) #define setup_deferrable_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) #define setup_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), 0) +#define setup_pinned_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED) #define setup_deferrable_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) /** * timer_pending - is a timer pending? @@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); -extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); - -extern void set_timer_slack(struct timer_list *time, int slack_hz); -#define TIMER_NOT_PINNED 0 -#define TIMER_PINNED 1 /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: diff --git a/include/linux/topology.h b/include/linux/topology.h index afce69296ac0..cb0775e1ee4b 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -54,7 +54,7 @@ int arch_update_cpu_topology(void); /* * If the distance between nodes in a system is larger than RECLAIM_DISTANCE * (in whatever arch specific measurement units returned by node_distance()) - * and zone_reclaim_mode is enabled then the VM will only call zone_reclaim() + * and node_reclaim_mode is enabled then the VM will only call node_reclaim() * on nodes within this distance. */ #define RECLAIM_DISTANCE 30 diff --git a/include/linux/torture.h b/include/linux/torture.h index 7759fc3c622d..6685a73736a2 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -50,6 +50,10 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_offl, int *min_onl, int *max_onl); +bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_onl, int *min_onl, int *max_onl); int torture_onoff_init(long ooholdoff, long oointerval); void torture_onoff_stats(void); bool torture_onoff_failures(void); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index fefe8b06a63d..612dbdfa388e 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -25,6 +25,8 @@ #include <linux/workqueue.h> #include <linux/usb/ch9.h> +#define UDC_TRACE_STR_MAX 512 + struct usb_ep; /** @@ -228,307 +230,49 @@ struct usb_ep { /*-------------------------------------------------------------------------*/ -/** - * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint - * @ep:the endpoint being configured - * @maxpacket_limit:value of maximum packet size limit - * - * This function should be used only in UDC drivers to initialize endpoint - * (usually in probe function). - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit); +int usb_ep_enable(struct usb_ep *ep); +int usb_ep_disable(struct usb_ep *ep); +struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req); +int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); +int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req); +int usb_ep_set_halt(struct usb_ep *ep); +int usb_ep_clear_halt(struct usb_ep *ep); +int usb_ep_set_wedge(struct usb_ep *ep); +int usb_ep_fifo_status(struct usb_ep *ep); +void usb_ep_fifo_flush(struct usb_ep *ep); +#else static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, - unsigned maxpacket_limit) -{ - ep->maxpacket_limit = maxpacket_limit; - ep->maxpacket = maxpacket_limit; -} - -/** - * usb_ep_enable - configure endpoint, making it usable - * @ep:the endpoint being configured. may not be the endpoint named "ep0". - * drivers discover endpoints through the ep_list of a usb_gadget. - * - * When configurations are set, or when interface settings change, the driver - * will enable or disable the relevant endpoints. while it is enabled, an - * endpoint may be used for i/o until the driver receives a disconnect() from - * the host or until the endpoint is disabled. - * - * the ep0 implementation (which calls this routine) must ensure that the - * hardware capabilities of each endpoint match the descriptor provided - * for it. for example, an endpoint named "ep2in-bulk" would be usable - * for interrupt transfers as well as bulk, but it likely couldn't be used - * for iso transfers or for endpoint 14. some endpoints are fully - * configurable, with more generic names like "ep-a". (remember that for - * USB, "in" means "towards the USB master".) - * - * returns zero, or a negative error code. - */ + unsigned maxpacket_limit) +{ } static inline int usb_ep_enable(struct usb_ep *ep) -{ - int ret; - - if (ep->enabled) - return 0; - - ret = ep->ops->enable(ep, ep->desc); - if (ret) - return ret; - - ep->enabled = true; - - return 0; -} - -/** - * usb_ep_disable - endpoint is no longer usable - * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". - * - * no other task may be using this endpoint when this is called. - * any pending and uncompleted requests will complete with status - * indicating disconnect (-ESHUTDOWN) before this call returns. - * gadget drivers must call usb_ep_enable() again before queueing - * requests to the endpoint. - * - * returns zero, or a negative error code. - */ +{ return 0; } static inline int usb_ep_disable(struct usb_ep *ep) -{ - int ret; - - if (!ep->enabled) - return 0; - - ret = ep->ops->disable(ep); - if (ret) - return ret; - - ep->enabled = false; - - return 0; -} - -/** - * usb_ep_alloc_request - allocate a request object to use with this endpoint - * @ep:the endpoint to be used with with the request - * @gfp_flags:GFP_* flags to use - * - * Request objects must be allocated with this call, since they normally - * need controller-specific setup and may even need endpoint-specific - * resources such as allocation of DMA descriptors. - * Requests may be submitted with usb_ep_queue(), and receive a single - * completion callback. Free requests with usb_ep_free_request(), when - * they are no longer needed. - * - * Returns the request, or null if one could not be allocated. - */ +{ return 0; } static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, - gfp_t gfp_flags) -{ - return ep->ops->alloc_request(ep, gfp_flags); -} - -/** - * usb_ep_free_request - frees a request object - * @ep:the endpoint associated with the request - * @req:the request being freed - * - * Reverses the effect of usb_ep_alloc_request(). - * Caller guarantees the request is not queued, and that it will - * no longer be requeued (or otherwise used). - */ + gfp_t gfp_flags) +{ return NULL; } static inline void usb_ep_free_request(struct usb_ep *ep, - struct usb_request *req) -{ - ep->ops->free_request(ep, req); -} - -/** - * usb_ep_queue - queues (submits) an I/O request to an endpoint. - * @ep:the endpoint associated with the request - * @req:the request being submitted - * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't - * pre-allocate all necessary memory with the request. - * - * This tells the device controller to perform the specified request through - * that endpoint (reading or writing a buffer). When the request completes, - * including being canceled by usb_ep_dequeue(), the request's completion - * routine is called to return the request to the driver. Any endpoint - * (except control endpoints like ep0) may have more than one transfer - * request queued; they complete in FIFO order. Once a gadget driver - * submits a request, that request may not be examined or modified until it - * is given back to that driver through the completion callback. - * - * Each request is turned into one or more packets. The controller driver - * never merges adjacent requests into the same packet. OUT transfers - * will sometimes use data that's already buffered in the hardware. - * Drivers can rely on the fact that the first byte of the request's buffer - * always corresponds to the first byte of some USB packet, for both - * IN and OUT transfers. - * - * Bulk endpoints can queue any amount of data; the transfer is packetized - * automatically. The last packet will be short if the request doesn't fill it - * out completely. Zero length packets (ZLPs) should be avoided in portable - * protocols since not all usb hardware can successfully handle zero length - * packets. (ZLPs may be explicitly written, and may be implicitly written if - * the request 'zero' flag is set.) Bulk endpoints may also be used - * for interrupt transfers; but the reverse is not true, and some endpoints - * won't support every interrupt transfer. (Such as 768 byte packets.) - * - * Interrupt-only endpoints are less functional than bulk endpoints, for - * example by not supporting queueing or not handling buffers that are - * larger than the endpoint's maxpacket size. They may also treat data - * toggle differently. - * - * Control endpoints ... after getting a setup() callback, the driver queues - * one response (even if it would be zero length). That enables the - * status ack, after transferring data as specified in the response. Setup - * functions may return negative error codes to generate protocol stalls. - * (Note that some USB device controllers disallow protocol stall responses - * in some cases.) When control responses are deferred (the response is - * written after the setup callback returns), then usb_ep_set_halt() may be - * used on ep0 to trigger protocol stalls. Depending on the controller, - * it may not be possible to trigger a status-stage protocol stall when the - * data stage is over, that is, from within the response's completion - * routine. - * - * For periodic endpoints, like interrupt or isochronous ones, the usb host - * arranges to poll once per interval, and the gadget driver usually will - * have queued some data to transfer at that time. - * - * Returns zero, or a negative error code. Endpoints that are not enabled - * report errors; errors will also be - * reported when the usb peripheral is disconnected. - */ -static inline int usb_ep_queue(struct usb_ep *ep, - struct usb_request *req, gfp_t gfp_flags) -{ - if (WARN_ON_ONCE(!ep->enabled && ep->address)) - return -ESHUTDOWN; - - return ep->ops->queue(ep, req, gfp_flags); -} - -/** - * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint - * @ep:the endpoint associated with the request - * @req:the request being canceled - * - * If the request is still active on the endpoint, it is dequeued and its - * completion routine is called (with status -ECONNRESET); else a negative - * error code is returned. This is guaranteed to happen before the call to - * usb_ep_dequeue() returns. - * - * Note that some hardware can't clear out write fifos (to unlink the request - * at the head of the queue) except as part of disconnecting from usb. Such - * restrictions prevent drivers from supporting configuration changes, - * even to configuration zero (a "chapter 9" requirement). - */ + struct usb_request *req) +{ } +static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags) +{ return 0; } static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) -{ - return ep->ops->dequeue(ep, req); -} - -/** - * usb_ep_set_halt - sets the endpoint halt feature. - * @ep: the non-isochronous endpoint being stalled - * - * Use this to stall an endpoint, perhaps as an error report. - * Except for control endpoints, - * the endpoint stays halted (will not stream any data) until the host - * clears this feature; drivers may need to empty the endpoint's request - * queue first, to make sure no inappropriate transfers happen. - * - * Note that while an endpoint CLEAR_FEATURE will be invisible to the - * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the - * current altsetting, see usb_ep_clear_halt(). When switching altsettings, - * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. - * - * Returns zero, or a negative error code. On success, this call sets - * underlying hardware state that blocks data transfers. - * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any - * transfer requests are still queued, or if the controller hardware - * (usually a FIFO) still holds bytes that the host hasn't collected. - */ +{ return 0; } static inline int usb_ep_set_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_clear_halt - clears endpoint halt, and resets toggle - * @ep:the bulk or interrupt endpoint being reset - * - * Use this when responding to the standard usb "set interface" request, - * for endpoints that aren't reconfigured, after clearing any other state - * in the endpoint's i/o queue. - * - * Returns zero, or a negative error code. On success, this call clears - * the underlying hardware state reflecting endpoint halt and data toggle. - * Note that some hardware can't support this request (like pxa2xx_udc), - * and accordingly can't correctly implement interface altsettings. - */ +{ return 0; } static inline int usb_ep_clear_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 0); -} - -/** - * usb_ep_set_wedge - sets the halt feature and ignores clear requests - * @ep: the endpoint being wedged - * - * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) - * requests. If the gadget driver clears the halt status, it will - * automatically unwedge the endpoint. - * - * Returns zero on success, else negative errno. - */ -static inline int -usb_ep_set_wedge(struct usb_ep *ep) -{ - if (ep->ops->set_wedge) - return ep->ops->set_wedge(ep); - else - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_fifo_status - returns number of bytes in fifo, or error - * @ep: the endpoint whose fifo status is being checked. - * - * FIFO endpoints may have "unclaimed data" in them in certain cases, - * such as after aborted transfers. Hosts may not have collected all - * the IN data written by the gadget driver (and reported by a request - * completion). The gadget driver may not have collected all the data - * written OUT to it by the host. Drivers that need precise handling for - * fault reporting or recovery may need to use this call. - * - * This returns the number of such bytes in the fifo, or a negative - * errno if the endpoint doesn't use a FIFO or doesn't support such - * precise handling. - */ +{ return 0; } +static inline int usb_ep_set_wedge(struct usb_ep *ep) +{ return 0; } static inline int usb_ep_fifo_status(struct usb_ep *ep) -{ - if (ep->ops->fifo_status) - return ep->ops->fifo_status(ep); - else - return -EOPNOTSUPP; -} - -/** - * usb_ep_fifo_flush - flushes contents of a fifo - * @ep: the endpoint whose fifo is being flushed. - * - * This call may be used to flush the "unclaimed data" that may exist in - * an endpoint fifo after abnormal transaction terminations. The call - * must never be used except when endpoint is not being used for any - * protocol translation. - */ +{ return 0; } static inline void usb_ep_fifo_flush(struct usb_ep *ep) -{ - if (ep->ops->fifo_flush) - ep->ops->fifo_flush(ep); -} - +{ } +#endif /* USB_GADGET */ /*-------------------------------------------------------------------------*/ @@ -582,6 +326,7 @@ struct usb_gadget_ops { * @dev: Driver model state for this abstract device. * @out_epnum: last used out ep number * @in_epnum: last used in ep number + * @mA: last set mA value * @otg_caps: OTG capabilities of this gadget. * @sg_supported: true if we can handle scatter-gather * @is_otg: True if the USB device port uses a Mini-AB jack, so that the @@ -638,6 +383,7 @@ struct usb_gadget { struct device dev; unsigned out_epnum; unsigned in_epnum; + unsigned mA; struct usb_otg_caps *otg_caps; unsigned sg_supported:1; @@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g) #endif } -/** - * usb_gadget_frame_number - returns the current frame number - * @gadget: controller that reports the frame number - * - * Returns the usb frame number, normally eleven bits from a SOF packet, - * or negative errno if this device doesn't support this capability. - */ -static inline int usb_gadget_frame_number(struct usb_gadget *gadget) -{ - return gadget->ops->get_frame(gadget); -} +/*-------------------------------------------------------------------------*/ -/** - * usb_gadget_wakeup - tries to wake up the host connected to this gadget - * @gadget: controller used to wake up the host - * - * Returns zero on success, else negative error code if the hardware - * doesn't support such attempts, or its support has not been enabled - * by the usb host. Drivers must return device descriptors that report - * their ability to support this, or hosts won't enable it. - * - * This may also try to use SRP to wake the host and start enumeration, - * even if OTG isn't otherwise in use. OTG devices may also start - * remote wakeup even when hosts don't explicitly enable it. - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +int usb_gadget_frame_number(struct usb_gadget *gadget); +int usb_gadget_wakeup(struct usb_gadget *gadget); +int usb_gadget_set_selfpowered(struct usb_gadget *gadget); +int usb_gadget_clear_selfpowered(struct usb_gadget *gadget); +int usb_gadget_vbus_connect(struct usb_gadget *gadget); +int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA); +int usb_gadget_vbus_disconnect(struct usb_gadget *gadget); +int usb_gadget_connect(struct usb_gadget *gadget); +int usb_gadget_disconnect(struct usb_gadget *gadget); +int usb_gadget_deactivate(struct usb_gadget *gadget); +int usb_gadget_activate(struct usb_gadget *gadget); +#else +static inline int usb_gadget_frame_number(struct usb_gadget *gadget) +{ return 0; } static inline int usb_gadget_wakeup(struct usb_gadget *gadget) -{ - if (!gadget->ops->wakeup) - return -EOPNOTSUPP; - return gadget->ops->wakeup(gadget); -} - -/** - * usb_gadget_set_selfpowered - sets the device selfpowered feature. - * @gadget:the device being declared as self-powered - * - * this affects the device status reported by the hardware driver - * to reflect that it now has a local power supply. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 1); -} - -/** - * usb_gadget_clear_selfpowered - clear the device selfpowered feature. - * @gadget:the device being declared as bus-powered - * - * this affects the device status reported by the hardware driver. - * some hardware may not support bus-powered operation, in which - * case this feature's value can never change. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 0); -} - -/** - * usb_gadget_vbus_connect - Notify controller that VBUS is powered - * @gadget:The device which now has VBUS power. - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session starting. Common responses include - * resuming the controller, activating the D+ (or D-) pullup to let the - * host detect that a USB device is attached, and starting to draw power - * (8mA or possibly more, especially after SET_CONFIGURATION). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 1); -} - -/** - * usb_gadget_vbus_draw - constrain controller's VBUS power usage - * @gadget:The device whose VBUS usage is being described - * @mA:How much current to draw, in milliAmperes. This should be twice - * the value listed in the configuration descriptor bMaxPower field. - * - * This call is used by gadget drivers during SET_CONFIGURATION calls, - * reporting how much power the device may consume. For example, this - * could affect how quickly batteries are recharged. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) -{ - if (!gadget->ops->vbus_draw) - return -EOPNOTSUPP; - return gadget->ops->vbus_draw(gadget, mA); -} - -/** - * usb_gadget_vbus_disconnect - notify controller about VBUS session end - * @gadget:the device whose VBUS supply is being described - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session ending. Common responses include - * reversing everything done in usb_gadget_vbus_connect(). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 0); -} - -/** - * usb_gadget_connect - software-controlled connect to USB host - * @gadget:the peripheral being connected - * - * Enables the D+ (or potentially D-) pullup. The host will start - * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). This pullup is always enabled unless - * usb_gadget_disconnect() has been used to disable it. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_connect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will be connected automatically after activation. - */ - gadget->connected = true; - return 0; - } - - ret = gadget->ops->pullup(gadget, 1); - if (!ret) - gadget->connected = 1; - return ret; -} - -/** - * usb_gadget_disconnect - software-controlled disconnect from USB host - * @gadget:the peripheral being disconnected - * - * Disables the D+ (or potentially D-) pullup, which the host may see - * as a disconnect (when a VBUS session is active). Not all systems - * support software pullup controls. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_disconnect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will stay disconnected after activation. - */ - gadget->connected = false; - return 0; - } - - ret = gadget->ops->pullup(gadget, 0); - if (!ret) - gadget->connected = 0; - return ret; -} - -/** - * usb_gadget_deactivate - deactivate function which is not ready to work - * @gadget: the peripheral being deactivated - * - * This routine may be used during the gadget driver bind() call to prevent - * the peripheral from ever being visible to the USB host, unless later - * usb_gadget_activate() is called. For example, user mode components may - * need to be activated before the system can talk to hosts. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_deactivate(struct usb_gadget *gadget) -{ - int ret; - - if (gadget->deactivated) - return 0; - - if (gadget->connected) { - ret = usb_gadget_disconnect(gadget); - if (ret) - return ret; - /* - * If gadget was being connected before deactivation, we want - * to reconnect it in usb_gadget_activate(). - */ - gadget->connected = true; - } - gadget->deactivated = true; - - return 0; -} - -/** - * usb_gadget_activate - activate function which is not ready to work - * @gadget: the peripheral being activated - * - * This routine activates gadget which was previously deactivated with - * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) -{ - if (!gadget->deactivated) - return 0; - - gadget->deactivated = false; - - /* - * If gadget has been connected before deactivation, or became connected - * while it was being deactivated, we call usb_gadget_connect(). - */ - if (gadget->connected) - return usb_gadget_connect(gadget); - - return 0; -} +{ return 0; } +#endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h deleted file mode 100644 index 8c8f6854c993..000000000000 --- a/include/linux/usb/msm_hsusb.h +++ /dev/null @@ -1,200 +0,0 @@ -/* linux/include/asm-arm/arch-msm/hsusb.h - * - * Copyright (C) 2008 Google, Inc. - * Author: Brian Swetland <swetland@google.com> - * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __ASM_ARCH_MSM_HSUSB_H -#define __ASM_ARCH_MSM_HSUSB_H - -#include <linux/extcon.h> -#include <linux/types.h> -#include <linux/usb/otg.h> -#include <linux/clk.h> - -/** - * OTG control - * - * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host - * only configuration. - * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY. - * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware. - * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs. - * - */ -enum otg_control_type { - OTG_NO_CONTROL = 0, - OTG_PHY_CONTROL, - OTG_PMIC_CONTROL, - OTG_USER_CONTROL, -}; - -/** - * PHY used in - * - * INVALID_PHY Unsupported PHY - * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY - * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY - * - */ -enum msm_usb_phy_type { - INVALID_PHY = 0, - CI_45NM_INTEGRATED_PHY, - SNPS_28NM_INTEGRATED_PHY, -}; - -#define IDEV_CHG_MAX 1500 -#define IUNIT 100 - -/** - * Different states involved in USB charger detection. - * - * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection - * process is not yet started. - * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. - * USB_CHG_STATE_DCD_DONE Data pin contact is detected. - * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects - * between SDP and DCP/CDP). - * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects - * between DCP and CDP). - * USB_CHG_STATE_DETECTED USB charger type is determined. - * - */ -enum usb_chg_state { - USB_CHG_STATE_UNDEFINED = 0, - USB_CHG_STATE_WAIT_FOR_DCD, - USB_CHG_STATE_DCD_DONE, - USB_CHG_STATE_PRIMARY_DONE, - USB_CHG_STATE_SECONDARY_DONE, - USB_CHG_STATE_DETECTED, -}; - -/** - * USB charger types - * - * USB_INVALID_CHARGER Invalid USB charger. - * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port - * on USB2.0 compliant host/hub. - * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger). - * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and - * IDEV_CHG_MAX can be drawn irrespective of USB state. - * - */ -enum usb_chg_type { - USB_INVALID_CHARGER = 0, - USB_SDP_CHARGER, - USB_DCP_CHARGER, - USB_CDP_CHARGER, -}; - -/** - * struct msm_otg_platform_data - platform device data - * for msm_otg driver. - * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as - * "do not overwrite default vaule at this address". - * @phy_init_sz: PHY configuration sequence size. - * @vbus_power: VBUS power on/off routine. - * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). - * @mode: Supported mode (OTG/peripheral/host). - * @otg_control: OTG switch controlled by user/Id pin - */ -struct msm_otg_platform_data { - int *phy_init_seq; - int phy_init_sz; - void (*vbus_power)(bool on); - unsigned power_budget; - enum usb_dr_mode mode; - enum otg_control_type otg_control; - enum msm_usb_phy_type phy_type; - void (*setup_gpio)(enum usb_otg_state state); -}; - -/** - * struct msm_usb_cable - structure for exteternal connector cable - * state tracking - * @nb: hold event notification callback - * @conn: used for notification registration - */ -struct msm_usb_cable { - struct notifier_block nb; - struct extcon_dev *extcon; -}; - -/** - * struct msm_otg: OTG driver data. Shared by HCD and DCD. - * @otg: USB OTG Transceiver structure. - * @pdata: otg device platform data. - * @irq: IRQ number assigned for HSUSB controller. - * @clk: clock struct of usb_hs_clk. - * @pclk: clock struct of usb_hs_pclk. - * @core_clk: clock struct of usb_hs_core_clk. - * @regs: ioremapped register base address. - * @inputs: OTG state machine inputs(Id, SessValid etc). - * @sm_work: OTG state machine work. - * @in_lpm: indicates low power mode (LPM) state. - * @async_int: Async interrupt arrived. - * @cur_power: The amount of mA available from downstream port. - * @chg_work: Charger detection work. - * @chg_state: The state of charger detection process. - * @chg_type: The type of charger attached. - * @dcd_retires: The retry count used to track Data contact - * detection process. - * @manual_pullup: true if VBUS is not routed to USB controller/phy - * and controller driver therefore enables pull-up explicitly before - * starting controller using usbcmd run/stop bit. - * @vbus: VBUS signal state trakining, using extcon framework - * @id: ID signal state trakining, using extcon framework - * @switch_gpio: Descriptor for GPIO used to control external Dual - * SPDT USB Switch. - * @reboot: Used to inform the driver to route USB D+/D- line to Device - * connector - */ -struct msm_otg { - struct usb_phy phy; - struct msm_otg_platform_data *pdata; - int irq; - struct clk *clk; - struct clk *pclk; - struct clk *core_clk; - void __iomem *regs; -#define ID 0 -#define B_SESS_VLD 1 - unsigned long inputs; - struct work_struct sm_work; - atomic_t in_lpm; - int async_int; - unsigned cur_power; - int phy_number; - struct delayed_work chg_work; - enum usb_chg_state chg_state; - enum usb_chg_type chg_type; - u8 dcd_retries; - struct regulator *v3p3; - struct regulator *v1p8; - struct regulator *vddcx; - - struct reset_control *phy_rst; - struct reset_control *link_rst; - int vdd_levels[3]; - - bool manual_pullup; - - struct msm_usb_cable vbus; - struct msm_usb_cable id; - - struct gpio_desc *switch_gpio; - struct notifier_block reboot; -}; - -#endif diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index de3237fce6b2..5ff9032ee1b4 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,7 +12,7 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) -enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); @@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent, int portnum); #else static inline enum usb_dr_mode -of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) { return USB_DR_MODE_UNKNOWN; } diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h deleted file mode 100644 index 376654b5b0f7..000000000000 --- a/include/linux/usb/xhci_pdriver.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef __USB_CORE_XHCI_PDRIVER_H -#define __USB_CORE_XHCI_PDRIVER_H - -/** - * struct usb_xhci_pdata - platform_data for generic xhci platform driver - * - * @usb3_lpm_capable: determines if this xhci platform supports USB3 - * LPM capability - * - */ -struct usb_xhci_pdata { - unsigned usb3_lpm_capable:1; -}; - -#endif /* __USB_CORE_XHCI_PDRIVER_H */ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 587480ad41b7..dd66a952e8cd 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,8 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long reason); +extern int handle_userfault(struct fault_env *fe, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct vm_area_struct *vma, - unsigned long address, - unsigned int flags, - unsigned long reason) +static inline int handle_userfault(struct fault_env *fe, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h new file mode 100644 index 000000000000..1c912f85e041 --- /dev/null +++ b/include/linux/virtio_net.h @@ -0,0 +1,101 @@ +#ifndef _LINUX_VIRTIO_NET_H +#define _LINUX_VIRTIO_NET_H + +#include <linux/if_vlan.h> +#include <uapi/linux/virtio_net.h> + +static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + const struct virtio_net_hdr *hdr, + bool little_endian) +{ + unsigned short gso_type = 0; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + gso_type = SKB_GSO_TCPV4; + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + gso_type = SKB_GSO_TCPV6; + break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; + break; + default: + return -EINVAL; + } + + if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) + gso_type |= SKB_GSO_TCP_ECN; + + if (hdr->gso_size == 0) + return -EINVAL; + } + + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); + u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); + + if (!skb_partial_csum_set(skb, start, off)) + return -EINVAL; + } + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + + skb_shinfo(skb)->gso_size = gso_size; + skb_shinfo(skb)->gso_type = gso_type; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + return 0; +} + +static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, + struct virtio_net_hdr *hdr, + bool little_endian) +{ + memset(hdr, 0, sizeof(*hdr)); + + if (skb_is_gso(skb)) { + struct skb_shared_info *sinfo = skb_shinfo(skb); + + /* This is a hint as to how much should be linear. */ + hdr->hdr_len = __cpu_to_virtio16(little_endian, + skb_headlen(skb)); + hdr->gso_size = __cpu_to_virtio16(little_endian, + sinfo->gso_size); + if (sinfo->gso_type & SKB_GSO_TCPV4) + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + else if (sinfo->gso_type & SKB_GSO_TCPV6) + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; + else + return -EINVAL; + if (sinfo->gso_type & SKB_GSO_TCP_ECN) + hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; + } else + hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + if (skb_vlan_tag_present(skb)) + hdr->csum_start = __cpu_to_virtio16(little_endian, + skb_checksum_start_offset(skb) + VLAN_HLEN); + else + hdr->csum_start = __cpu_to_virtio16(little_endian, + skb_checksum_start_offset(skb)); + hdr->csum_offset = __cpu_to_virtio16(little_endian, + skb->csum_offset); + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; + } /* else everything is zero */ + + return 0; +} + +#endif /* _LINUX_VIRTIO_BYTEORDER */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ec084321fe09..4d6ec58a8d45 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -23,21 +23,23 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), + FOR_ALL_ZONES(ALLOCSTALL), + FOR_ALL_ZONES(PGSCAN_SKIP), PGFREE, PGACTIVATE, PGDEACTIVATE, PGFAULT, PGMAJFAULT, PGLAZYFREED, - FOR_ALL_ZONES(PGREFILL), - FOR_ALL_ZONES(PGSTEAL_KSWAPD), - FOR_ALL_ZONES(PGSTEAL_DIRECT), - FOR_ALL_ZONES(PGSCAN_KSWAPD), - FOR_ALL_ZONES(PGSCAN_DIRECT), + PGREFILL, + PGSTEAL_KSWAPD, + PGSTEAL_DIRECT, + PGSCAN_KSWAPD, + PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, #ifdef CONFIG_NUMA PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, - PAGEOUTRUN, ALLOCSTALL, PGROTATED, + PAGEOUTRUN, PGROTATED, DROP_PAGECACHE, DROP_SLAB, #ifdef CONFIG_NUMA_BALANCING NUMA_PTE_UPDATES, @@ -70,6 +72,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_FAULT_FALLBACK, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, + THP_FILE_ALLOC, + THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, @@ -100,4 +104,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_VM_EVENT_ITEMS }; +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) +#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) +#endif + #endif /* VM_EVENT_ITEM_H_INCLUDED */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d2da8e053210..613771909b6e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -101,25 +101,42 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_vmacache_event(x) do {} while (0) #endif -#define __count_zone_vm_events(item, zone, delta) \ - __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ - zone_idx(zone), delta) +#define __count_zid_vm_events(item, zid, delta) \ + __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* - * Zone based page accounting with per cpu differentials. + * Zone and node-based page accounting with per cpu differentials. */ -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); - atomic_long_add(x, &vm_stat[item]); + atomic_long_add(x, &vm_zone_stat[item]); +} + +static inline void node_page_state_add(long x, struct pglist_data *pgdat, + enum node_stat_item item) +{ + atomic_long_add(x, &pgdat->vm_stat[item]); + atomic_long_add(x, &vm_node_stat[item]); } static inline unsigned long global_page_state(enum zone_stat_item item) { - long x = atomic_long_read(&vm_stat[item]); + long x = atomic_long_read(&vm_zone_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -160,32 +177,61 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -#ifdef CONFIG_NUMA +static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, + enum node_stat_item item) +{ + long x = atomic_long_read(&pgdat->vm_stat[item]); -extern unsigned long node_page_state(int node, enum zone_stat_item item); +#ifdef CONFIG_SMP + int cpu; + for_each_online_cpu(cpu) + x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; -#else + if (x < 0) + x = 0; +#endif + return x; +} -#define node_page_state(node, item) global_page_state(item) +#ifdef CONFIG_NUMA +extern unsigned long sum_zone_node_page_state(int node, + enum zone_stat_item item); +extern unsigned long node_page_state(struct pglist_data *pgdat, + enum node_stat_item item); +#else +#define sum_zone_node_page_state(node, item) global_page_state(item) +#define node_page_state(node, item) global_node_page_state(item) #endif /* CONFIG_NUMA */ #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) +#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) +#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); +void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); +void __inc_node_page_state(struct page *, enum node_stat_item); +void __dec_node_page_state(struct page *, enum node_stat_item); + void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); -extern void inc_zone_state(struct zone *, enum zone_stat_item); +void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); +void inc_node_page_state(struct page *, enum node_stat_item); +void dec_node_page_state(struct page *, enum node_stat_item); + +extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_node_state(struct pglist_data *, enum node_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_node_state(struct pglist_data *, enum node_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); @@ -213,16 +259,34 @@ static inline void __mod_zone_page_state(struct zone *zone, zone_page_state_add(delta, zone, item); } +static inline void __mod_node_page_state(struct pglist_data *pgdat, + enum node_stat_item item, int delta) +{ + node_page_state_add(delta, pgdat, item); +} + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); - atomic_long_inc(&vm_stat[item]); + atomic_long_inc(&vm_zone_stat[item]); +} + +static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) +{ + atomic_long_inc(&pgdat->vm_stat[item]); + atomic_long_inc(&vm_node_stat[item]); } static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); - atomic_long_dec(&vm_stat[item]); + atomic_long_dec(&vm_zone_stat[item]); +} + +static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) +{ + atomic_long_dec(&pgdat->vm_stat[item]); + atomic_long_dec(&vm_node_stat[item]); } static inline void __inc_zone_page_state(struct page *page, @@ -231,12 +295,26 @@ static inline void __inc_zone_page_state(struct page *page, __inc_zone_state(page_zone(page), item); } +static inline void __inc_node_page_state(struct page *page, + enum node_stat_item item) +{ + __inc_node_state(page_pgdat(page), item); +} + + static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); } +static inline void __dec_node_page_state(struct page *page, + enum node_stat_item item) +{ + __dec_node_state(page_pgdat(page), item); +} + + /* * We only use atomic operations to update counters. So there is no need to * disable interrupts. @@ -245,7 +323,12 @@ static inline void __dec_zone_page_state(struct page *page, #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state +#define inc_node_page_state __inc_node_page_state +#define dec_node_page_state __dec_node_page_state +#define mod_node_page_state __mod_node_page_state + #define inc_zone_state __inc_zone_state +#define inc_node_state __inc_node_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { } diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 8d7634247fb4..6abd24f258bc 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -45,7 +45,7 @@ void poke_blanked_console(void); int con_font_op(struct vc_data *vc, struct console_font_op *op); int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); -void scrollback(struct vc_data *vc, int lines); +void scrollback(struct vc_data *vc); void scrollfront(struct vc_data *vc, int lines); void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); @@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg); #ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ -struct unimapinit; struct unipair; int con_set_trans_old(unsigned char __user * table); int con_get_trans_old(unsigned char __user * table); int con_set_trans_new(unsigned short __user * table); int con_get_trans_new(unsigned short __user * table); -int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui); +int con_clear_unimap(struct vc_data *vc); int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); int con_set_default_unimap(struct vc_data *vc); @@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table) { return -EINVAL; } -static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) +static inline int con_clear_unimap(struct vc_data *vc) { return 0; } diff --git a/include/linux/vtime.h b/include/linux/vtime.h index fa2196990f84..aa9bfea8804a 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -12,11 +12,9 @@ struct task_struct; /* * vtime_accounting_cpu_enabled() definitions/declarations */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) static inline bool vtime_accounting_cpu_enabled(void) { return true; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* * Checks if vtime is enabled on some CPU. Cputime readers want to be careful * in that case and compute the tickless cputime. @@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void) return false; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline bool vtime_accounting_cpu_enabled(void) { return false; } -#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* @@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); extern void vtime_account_user(struct task_struct *tsk); -#ifdef __ARCH_HAS_VTIME_ACCOUNT -extern void vtime_account_irq_enter(struct task_struct *tsk); -#else -extern void vtime_common_account_irq_enter(struct task_struct *tsk); -static inline void vtime_account_irq_enter(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_common_account_irq_enter(tsk); -} -#endif /* __ARCH_HAS_VTIME_ACCOUNT */ - #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } static inline void vtime_account_user(struct task_struct *tsk) { } -static inline void vtime_account_irq_enter(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_gen_account_irq_exit(struct task_struct *tsk); - -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_gen_account_irq_exit(tsk); -} - extern void vtime_user_enter(struct task_struct *tsk); static inline void vtime_user_exit(struct task_struct *tsk) @@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - /* On hard|softirq exit we always account to hard|softirq cputime */ - vtime_account_system(tsk); -} static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } @@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_irq_enter(struct task_struct *tsk); +static inline void vtime_account_irq_exit(struct task_struct *tsk) +{ + /* On hard|softirq exit we always account to hard|softirq cputime */ + vtime_account_system(tsk); +} +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +static inline void vtime_account_irq_enter(struct task_struct *tsk) { } +static inline void vtime_account_irq_exit(struct task_struct *tsk) { } +#endif + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING extern void irqtime_account_irq(struct task_struct *tsk); #else diff --git a/include/linux/wait.h b/include/linux/wait.h index 27d7a0ab5da3..c3ff74d764fa 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -600,6 +600,19 @@ do { \ __ret; \ }) +#define __wait_event_killable_exclusive(wq, condition) \ + ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ + schedule()) + +#define wait_event_killable_exclusive(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_killable_exclusive(wq, condition); \ + __ret; \ +}) + #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d0b5ca5d4e08..fc1e16c25a29 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -320,7 +320,7 @@ void laptop_mode_timer_fn(unsigned long data); static inline void laptop_sync_completion(void) { } #endif void throttle_vm_writeout(gfp_t gfp_mask); -bool zone_dirty_ok(struct zone *zone); +bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK void wb_domain_exit(struct wb_domain *dom); @@ -384,4 +384,7 @@ void tag_pages_for_writeback(struct address_space *mapping, void account_page_redirty(struct page *page); +void sb_mark_inode_writeback(struct inode *inode); +void sb_clear_inode_writeback(struct inode *inode); + #endif /* WRITEBACK_H */ |