diff options
author | David S. Miller <davem@davemloft.net> | 2016-10-30 17:42:58 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-30 17:42:58 +0100 |
commit | 27058af401e49d88a905df000dd26f443fcfa8ce (patch) | |
tree | 819f32113d3b8374b9fbf72e2202d4c4d4511a60 /include | |
parent | firewire: net: really fix maximum possible MTU (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
download | linux-27058af401e49d88a905df000dd26f443fcfa8ce.tar.xz linux-27058af401e49d88a905df000dd26f443fcfa8ce.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes.
For example, David Ahern's adjacency list revamp in 'net-next'
conflicted with an adjacency list traversal bug fix in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
65 files changed, 2156 insertions, 372 deletions
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 17a940a14477..8caa79c61703 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan); static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) { - return NULL; + return ERR_PTR(-ENODEV); } static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } #endif diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 000000000000..63554e9f6e0c --- /dev/null +++ b/include/asm-generic/export.h @@ -0,0 +1,94 @@ +#ifndef __ASM_GENERIC_EXPORT_H +#define __ASM_GENERIC_EXPORT_H + +#ifndef KSYM_FUNC +#define KSYM_FUNC(x) x +#endif +#ifdef CONFIG_64BIT +#define __put .quad +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 8 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 8 +#endif +#else +#define __put .long +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 4 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 4 +#endif +#endif + +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +#define KSYM(name) _##name +#else +#define KSYM(name) name +#endif + +/* + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. + */ +.macro ___EXPORT_SYMBOL name,val,sec +#ifdef CONFIG_MODULES + .globl KSYM(__ksymtab_\name) + .section ___ksymtab\sec+\name,"a" + .balign KSYM_ALIGN +KSYM(__ksymtab_\name): + __put \val, KSYM(__kstrtab_\name) + .previous + .section __ksymtab_strings,"a" +KSYM(__kstrtab_\name): +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX + .asciz "_\name" +#else + .asciz "\name" +#endif + .previous +#ifdef CONFIG_MODVERSIONS + .section ___kcrctab\sec+\name,"a" + .balign KCRC_ALIGN +KSYM(__kcrctab_\name): + __put KSYM(__crc_\name) + .weak KSYM(__crc_\name) + .previous +#endif +#endif +.endm +#undef __put + +#if defined(__KSYM_DEPS__) + +#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <linux/kconfig.h> +#include <generated/autoksyms.h> + +#define __EXPORT_SYMBOL(sym, val, sec) \ + __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, val, sec, conf) \ + ___cond_export_sym(sym, val, sec, conf) +#define ___cond_export_sym(sym, val, sec, enabled) \ + __cond_export_sym_##enabled(sym, val, sec) +#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#define __cond_export_sym_0(sym, val, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#endif + +#define EXPORT_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) +#define EXPORT_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) +#define EXPORT_DATA_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),) +#define EXPORT_DATA_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),_gpl) + +#endif diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h deleted file mode 100644 index cf14f2ff40b6..000000000000 --- a/include/asm-generic/libata-portmap.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H -#define __ASM_GENERIC_LIBATA_PORTMAP_H - -#define ATA_PRIMARY_IRQ(dev) 14 -#define ATA_SECONDARY_IRQ(dev) 15 - -#endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 4d9f233c4ba8..40e887068da2 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -65,6 +65,11 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_DEF_ATTRIBUTES #endif +#define raw_cpu_generic_read(pcp) \ +({ \ + *raw_cpu_ptr(&(pcp)); \ +}) + #define raw_cpu_generic_to_op(pcp, val, op) \ do { \ *raw_cpu_ptr(&(pcp)) op val; \ @@ -72,34 +77,39 @@ do { \ #define raw_cpu_generic_add_return(pcp, val) \ ({ \ - raw_cpu_add(pcp, val); \ - raw_cpu_read(pcp); \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + \ + *__p += val; \ + *__p; \ }) #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ - __ret = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ + __ret = *__p; \ + *__p = nval; \ __ret; \ }) #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ typeof(pcp) __ret; \ - __ret = raw_cpu_read(pcp); \ + __ret = *__p; \ if (__ret == (oval)) \ - raw_cpu_write(pcp, nval); \ + *__p = nval; \ __ret; \ }) #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ ({ \ + typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \ + typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \ int __ret = 0; \ - if (raw_cpu_read(pcp1) == (oval1) && \ - raw_cpu_read(pcp2) == (oval2)) { \ - raw_cpu_write(pcp1, nval1); \ - raw_cpu_write(pcp2, nval2); \ + if (*__p1 == (oval1) && *__p2 == (oval2)) { \ + *__p1 = nval1; \ + *__p2 = nval2; \ __ret = 1; \ } \ (__ret); \ @@ -109,7 +119,7 @@ do { \ ({ \ typeof(pcp) __ret; \ preempt_disable(); \ - __ret = *this_cpu_ptr(&(pcp)); \ + __ret = raw_cpu_generic_read(pcp); \ preempt_enable(); \ __ret; \ }) @@ -118,17 +128,17 @@ do { \ do { \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - *raw_cpu_ptr(&(pcp)) op val; \ + raw_cpu_generic_to_op(pcp, val, op); \ raw_local_irq_restore(__flags); \ } while (0) + #define this_cpu_generic_add_return(pcp, val) \ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - raw_cpu_add(pcp, val); \ - __ret = raw_cpu_read(pcp); \ + __ret = raw_cpu_generic_add_return(pcp, val); \ raw_local_irq_restore(__flags); \ __ret; \ }) @@ -138,8 +148,7 @@ do { \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - __ret = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ + __ret = raw_cpu_generic_xchg(pcp, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) @@ -149,9 +158,7 @@ do { \ typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - __ret = raw_cpu_read(pcp); \ - if (__ret == (oval)) \ - raw_cpu_write(pcp, nval); \ + __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) @@ -168,16 +175,16 @@ do { \ }) #ifndef raw_cpu_read_1 -#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) +#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_2 -#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) +#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_4 -#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) +#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_read_8 -#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) +#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp) #endif #ifndef raw_cpu_write_1 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 3e42bcdd014b..30747960bc54 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -196,9 +196,14 @@ *(.dtb.init.rodata) \ VMLINUX_SYMBOL(__dtb_end) = .; -/* .data section */ +/* + * .data section + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates + * .data.identifier which needs to be pulled in with .data, but don't want to + * pull in .data..stuff which has its own requirements. Same for bss. + */ #define DATA_DATA \ - *(.data) \ + *(.data .data.[0-9a-zA-Z_]*) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data) \ @@ -320,76 +325,76 @@ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ - *(SORT(___ksymtab+*)) \ + KEEP(*(SORT(___ksymtab+*))) \ VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ - *(SORT(___ksymtab_gpl+*)) \ + KEEP(*(SORT(___ksymtab_gpl+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ - *(SORT(___ksymtab_unused+*)) \ + KEEP(*(SORT(___ksymtab_unused+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ - *(SORT(___ksymtab_unused_gpl+*)) \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ - *(SORT(___ksymtab_gpl_future+*)) \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab) = .; \ - *(SORT(___kcrctab+*)) \ + KEEP(*(SORT(___kcrctab+*))) \ VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ - *(SORT(___kcrctab_gpl+*)) \ + KEEP(*(SORT(___kcrctab_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ - *(SORT(___kcrctab_unused+*)) \ + KEEP(*(SORT(___kcrctab_unused+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ - *(SORT(___kcrctab_unused_gpl+*)) \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ - *(SORT(___kcrctab_gpl_future+*)) \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ - *(__ksymtab_strings) \ + KEEP(*(__ksymtab_strings)) \ } \ \ /* __*init sections */ \ @@ -424,12 +429,17 @@ #define SECURITY_INIT \ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ - *(.security_initcall.init) \ + KEEP(*(.security_initcall.init)) \ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } /* .text section. Map to function alignment to avoid address changes - * during second ld run in second ld pass when generating System.map */ + * during second ld run in second ld pass when generating System.map + * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates + * .text.identifier which needs to be pulled in with .text , but some + * architectures define .text.foo which is not intended to be pulled in here. + * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have + * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot .text .text.fixup .text.unlikely) \ @@ -533,6 +543,7 @@ /* init and exit section handling */ #define INIT_DATA \ + KEEP(*(SORT(___kentry+*))) \ *(.init.data) \ MEM_DISCARD(init.data) \ KERNEL_CTORS() \ @@ -599,7 +610,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss) \ + *(.bss .bss.[0-9a-zA-Z_]*) \ *(COMMON) \ } @@ -682,12 +693,12 @@ #define INIT_CALLS_LEVEL(level) \ VMLINUX_SYMBOL(__initcall##level##_start) = .; \ - *(.initcall##level##.init) \ - *(.initcall##level##s.init) \ + KEEP(*(.initcall##level##.init)) \ + KEEP(*(.initcall##level##s.init)) \ #define INIT_CALLS \ VMLINUX_SYMBOL(__initcall_start) = .; \ - *(.initcallearly.init) \ + KEEP(*(.initcallearly.init)) \ INIT_CALLS_LEVEL(0) \ INIT_CALLS_LEVEL(1) \ INIT_CALLS_LEVEL(2) \ @@ -701,21 +712,21 @@ #define CON_INITCALL \ VMLINUX_SYMBOL(__con_initcall_start) = .; \ - *(.con_initcall.init) \ + KEEP(*(.con_initcall.init)) \ VMLINUX_SYMBOL(__con_initcall_end) = .; #define SECURITY_INITCALL \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ - *(.security_initcall.init) \ + KEEP(*(.security_initcall.init)) \ VMLINUX_SYMBOL(__security_initcall_end) = .; #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ VMLINUX_SYMBOL(__initramfs_start) = .; \ - *(.init.ramfs) \ + KEEP(*(.init.ramfs)) \ . = ALIGN(8); \ - *(.init.ramfs.info) + KEEP(*(.init.ramfs.info)) #else #define INIT_RAM_FS #endif diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 94afcb2c384c..689a8b9b9c8f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -326,6 +326,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -946,9 +947,17 @@ struct acpi_reference_args { #ifdef CONFIG_ACPI int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args); +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args); + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args) +{ + return __acpi_node_get_property_reference(fwnode, name, index, + MAX_ACPI_REFERENCE_ARGS, args); +} int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, void **valptr); @@ -1024,6 +1033,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, return -ENXIO; } +static inline int +__acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args) +{ + return -ENXIO; +} + static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, const char *name, size_t index, struct acpi_reference_args *args) diff --git a/include/linux/ata.h b/include/linux/ata.h index adbc812c009b..fdb180367ba1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -105,6 +105,7 @@ enum { ATA_ID_CFA_KEY_MGMT = 162, ATA_ID_CFA_MODES = 163, ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), @@ -789,6 +790,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id) } /** + * + * Word: 206 - SCT Command Transport + * 15:12 - Vendor Specific + * 11:6 - Reserved + * 5 - SCT Command Transport Data Tables supported + * 4 - SCT Command Transport Features Control supported + * 3 - SCT Command Transport Error Recovery Control supported + * 2 - SCT Command Transport Write Same supported + * 1 - SCT Command Transport Long Sector Access supported + * 0 - SCT Command Transport supported + */ +static inline bool ata_id_sct_data_tables(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false; +} + +static inline bool ata_id_sct_features_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false; +} + +static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; +} + +static inline bool ata_id_sct_write_same(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false; +} + +static inline bool ata_id_sct_long_sector_access(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; +} + +static inline bool ata_id_sct_supported(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false; +} + +/** * ata_id_major_version - get ATA level of drive * @id: Identify data * @@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id) #endif } -/* - * Write LBA Range Entries to the buffer that will cover the extent from - * sector to sector + count. This is used for TRIM and for ADD LBA(S) - * TO NV CACHE PINNED SET. - */ -static inline unsigned ata_set_lba_range_entries(void *_buffer, - unsigned num, u64 sector, unsigned long count) -{ - __le64 *buffer = _buffer; - unsigned i = 0, used_bytes; - - while (i < num) { - u64 entry = sector | - ((u64)(count > 0xffff ? 0xffff : count) << 48); - buffer[i++] = __cpu_to_le64(entry); - if (count <= 0xffff) - break; - count -= 0xffff; - sector += 0xffff; - } - - used_bytes = ALIGN(i * 8, 512); - memset(buffer + i, 0, used_bytes - i * 8); - return used_bytes; -} - static inline bool ata_ok(u8 status) { return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index cbdbf34de5b6..3bf5d33800ab 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -343,16 +343,7 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) */ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) { - char *p; - - p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); - if (!p) { - strncpy(buf, "<unavailable>", buflen); - return -ENAMETOOLONG; - } - - memmove(buf, p, buf + buflen - p); - return 0; + return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); } /** diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 440a72164a11..c83c23f0577b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -97,7 +97,7 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); void cgroup_file_notify(struct cgroup_file *cfile); -char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); @@ -555,8 +555,7 @@ static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) return kernfs_name(cgrp->kn, buf, buflen); } -static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf, - size_t buflen) +static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) { return kernfs_path(cgrp->kn, buf, buflen); } @@ -658,8 +657,8 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, struct cgroup_namespace *old_ns); -char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, - struct cgroup_namespace *ns); +int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns); #else /* !CONFIG_CGROUPS */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index af596381fa0f..a428aec36ace 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table; * routines, one at of_clk_init(), and one at platform device probe */ #define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ - static void name##_of_clk_init_driver(struct device_node *np) \ + static void __init name##_of_clk_init_driver(struct device_node *np) \ { \ of_node_clear_flag(np, OF_POPULATED); \ fn(np); \ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 573c5a18908f..432f5c97e18f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -188,6 +188,13 @@ #endif /* GCC_VERSION >= 40300 */ #if GCC_VERSION >= 40500 + +#ifndef __CHECKER__ +#ifdef LATENT_ENTROPY_PLUGIN +#define __latent_entropy __attribute__((latent_entropy)) +#endif +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 668569844d37..cf0fa5d86059 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define unreachable() do { } while (1) #endif +/* + * KENTRY - kernel entry point + * This can be used to annotate symbols (functions or data) that are used + * without their linker symbol being referenced explicitly. For example, + * interrupt vector handlers, or functions in the kernel image that are found + * programatically. + * + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those + * are handled in their own way (with KEEP() in linker scripts). + * + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the + * linker script. For example an architecture could KEEP() its entire + * boot/exception vector code rather than annotate each function and data. + */ +#ifndef KENTRY +# define KENTRY(sym) \ + extern typeof(sym) sym; \ + static const unsigned long __kentry_##sym \ + __used \ + __attribute__((section("___kentry" "+" #sym ), used)) \ + = (unsigned long)&sym; +#endif + #ifndef RELOC_HIDE # define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ @@ -406,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __attribute_const__ /* unimplemented */ #endif +#ifndef __latent_entropy +# define __latent_entropy +#endif + /* * Tell gcc if a function is cold. The compiler will assume any path * directly leading to the call is unlikely. diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 631ba33bbe9f..32dc0cbd51ca 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq >= target_freq) - return i; + return pos - table; - best = i; + best = pos; } - return best; + return best - table; } /* Find lowest freq at or above target in a table in descending order */ @@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq == target_freq) - return i; + return pos - table; if (freq > target_freq) { - best = i; + best = pos; continue; } /* No freq found above target_freq */ - if (best == -1) - return i; + if (best == table - 1) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Works only on sorted freq-tables */ @@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq == target_freq) - return i; + return pos - table; if (freq < target_freq) { - best = i; + best = pos; continue; } /* No freq found below target_freq */ - if (best == -1) - return i; + if (best == table - 1) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Find highest freq at or below target in a table in descending order */ @@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq <= target_freq) - return i; + return pos - table; - best = i; + best = pos; } - return best; + return best - table; } /* Works only on sorted freq-tables */ @@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq == target_freq) - return i; + return pos - table; if (freq < target_freq) { - best = i; + best = pos; continue; } /* No freq found below target_freq */ - if (best == -1) - return i; + if (best == table - 1) + return pos - table; /* Choose the closest freq */ - if (target_freq - table[best].frequency > freq - target_freq) - return i; + if (target_freq - best->frequency > freq - target_freq) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Find closest freq to target in a table in descending order */ @@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos, *best = table - 1; unsigned int freq; - int i, best = -1; - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { - freq = table[i].frequency; + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; if (freq == target_freq) - return i; + return pos - table; if (freq > target_freq) { - best = i; + best = pos; continue; } /* No freq found above target_freq */ - if (best == -1) - return i; + if (best == table - 1) + return pos - table; /* Choose the closest freq */ - if (table[best].frequency - target_freq > target_freq - freq) - return i; + if (best->frequency - target_freq > target_freq - freq) + return pos - table; - return best; + return best - table; } - return best; + return best - table; } /* Works only on sorted freq-tables */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9b207a8c5af3..afe641c02dca 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -81,6 +81,7 @@ enum cpuhp_state { CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, diff --git a/include/linux/export.h b/include/linux/export.h index d7df4922da1d..2a0f61fbc731 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -1,5 +1,6 @@ #ifndef _LINUX_EXPORT_H #define _LINUX_EXPORT_H + /* * Export symbols from the kernel to modules. Forked from module.h * to reduce the amount of pointless cruft we feed to gcc when only @@ -42,27 +43,26 @@ extern struct module __this_module; #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ -#define __CRC_SYMBOL(sym, sec) \ - extern __visible void *__crc_##sym __attribute__((weak)); \ - static const unsigned long __kcrctab_##sym \ - __used \ - __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ +#define __CRC_SYMBOL(sym, sec) \ + extern __visible void *__crc_##sym __attribute__((weak)); \ + static const unsigned long __kcrctab_##sym \ + __used \ + __attribute__((section("___kcrctab" sec "+" #sym), used)) \ = (unsigned long) &__crc_##sym; #else #define __CRC_SYMBOL(sym, sec) #endif /* For every exported symbol, place a struct in the __ksymtab section */ -#define ___EXPORT_SYMBOL(sym, sec) \ - extern typeof(sym) sym; \ - __CRC_SYMBOL(sym, sec) \ - static const char __kstrtab_##sym[] \ - __attribute__((section("__ksymtab_strings"), aligned(1))) \ - = VMLINUX_SYMBOL_STR(sym); \ - extern const struct kernel_symbol __ksymtab_##sym; \ - __visible const struct kernel_symbol __ksymtab_##sym \ - __used \ - __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ +#define ___EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), aligned(1))) \ + = VMLINUX_SYMBOL_STR(sym); \ + static const struct kernel_symbol __ksymtab_##sym \ + __used \ + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ = { (unsigned long)&sym, __kstrtab_##sym } #if defined(__KSYM_DEPS__) diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index aca2a6a1d035..6e84b2cae6ad 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); void reset_files_struct(struct files_struct *); int unshare_files(struct files_struct **); -struct files_struct *dup_fd(struct files_struct *, int *); +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), diff --git a/include/linux/fs.h b/include/linux/fs.h index bc65d5918140..16d2b6e874d6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2934,6 +2934,7 @@ extern int vfs_stat(const char __user *, struct kstat *); extern int vfs_lstat(const char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); extern int vfs_fstatat(int , const char __user *, struct kstat *, int); +extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 1dbf52f9c24b..e0341af6950e 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask); extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ -extern void add_disk_randomness(struct gendisk *disk); +extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6824556d37ed..cd184bdca58f 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); -static inline const char *vmbus_dev_name(const struct hv_device *device_obj) -{ - const struct kobject *kobj = &device_obj->device.kobj; - - return kobj->name; -} - void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, diff --git a/include/linux/init.h b/include/linux/init.h index 5a3321a7909b..e30104ceb86d 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -39,7 +39,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold notrace +#define __init __section(.init.text) __cold notrace __latent_entropy #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) @@ -75,7 +75,8 @@ #define __exit __section(.exit.text) __exitused __cold notrace /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(.meminit.text) __cold notrace +#define __meminit __section(.meminit.text) __cold notrace \ + __latent_entropy #define __meminitdata __section(.meminit.data) #define __meminitconst __section(.meminit.rodata) #define __memexit __section(.memexit.text) __exitused __cold notrace @@ -139,24 +140,8 @@ extern bool initcall_debug; #ifndef __ASSEMBLY__ -#ifdef CONFIG_LTO -/* Work around a LTO gcc problem: when there is no reference to a variable - * in a module it will be moved to the end of the program. This causes - * reordering of initcalls which the kernel does not like. - * Add a dummy reference function to avoid this. The function is - * deleted by the linker. - */ -#define LTO_REFERENCE_INITCALL(x) \ - ; /* yes this is needed */ \ - static __used __exit void *reference_##x(void) \ - { \ - return &x; \ - } -#else -#define LTO_REFERENCE_INITCALL(x) -#endif - -/* initcalls are now grouped by functionality into separate +/* + * initcalls are now grouped by functionality into separate * subsections. Ordering inside the subsections is determined * by link order. * For backwards compatibility, initcall() puts the call in @@ -164,12 +149,16 @@ extern bool initcall_debug; * * The `id' arg to __define_initcall() is needed so that multiple initcalls * can point at the same handler without causing duplicate-symbol build errors. + * + * Initcalls are run by placing pointers in initcall sections that the + * kernel iterates at runtime. The linker can do dead code / data elimination + * and remove that completely, so the initcall sections have to be marked + * as KEEP() in the linker script. */ #define __define_initcall(fn, id) \ static initcall_t __initcall_##fn##id __used \ - __attribute__((__section__(".initcall" #id ".init"))) = fn; \ - LTO_REFERENCE_INITCALL(__initcall_##fn##id) + __attribute__((__section__(".initcall" #id ".init"))) = fn; /* * Early initcalls run before initializing SMP. @@ -205,15 +194,15 @@ extern bool initcall_debug; #define __initcall(fn) device_initcall(fn) -#define __exitcall(fn) \ +#define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn -#define console_initcall(fn) \ - static initcall_t __initcall_##fn \ +#define console_initcall(fn) \ + static initcall_t __initcall_##fn \ __used __section(.con_initcall.init) = fn -#define security_initcall(fn) \ - static initcall_t __initcall_##fn \ +#define security_initcall(fn) \ + static initcall_t __initcall_##fn \ __used __section(.security_initcall.init) = fn struct obs_kernel_param { diff --git a/include/linux/io.h b/include/linux/io.h index e2c8419278c1..82ef36eac8a1 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -141,4 +141,26 @@ enum { void *memremap(resource_size_t offset, size_t size, unsigned long flags); void memunmap(void *addr); +/* + * On x86 PAT systems we have memory tracking that keeps track of + * the allowed mappings on memory ranges. This tracking works for + * all the in-kernel mapping APIs (ioremap*), but where the user + * wishes to map a range from a physical device into user memory + * the tracking won't be updated. This API is to be used by + * drivers which remap physical device pages into userspace, + * and wants to make sure they are mapped WC and not UC. + */ +#ifndef arch_io_reserve_memtype_wc +static inline int arch_io_reserve_memtype_wc(resource_size_t base, + resource_size_t size) +{ + return 0; +} + +static inline void arch_io_free_memtype_wc(resource_size_t base, + resource_size_t size) +{ +} +#endif + #endif /* _LINUX_IO_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index e63e288dee83..7892f55a1866 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -19,11 +19,15 @@ struct vm_fault; #define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ /* - * Flags for iomap mappings: + * Flags for all iomap mappings: */ -#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ -#define IOMAP_F_SHARED 0x02 /* block shared with another file */ -#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */ +#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ + +/* + * Flags that only need to be reported for IOMAP_REPORT requests: + */ +#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ +#define IOMAP_F_SHARED 0x20 /* block shared with another file */ /* * Magic value for blkno: @@ -42,8 +46,9 @@ struct iomap { /* * Flags for iomap_begin / iomap_end. No flag implies a read. */ -#define IOMAP_WRITE (1 << 0) -#define IOMAP_ZERO (1 << 1) +#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ +#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ +#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ struct iomap_ops { /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7e9a789be5e0..ca1ad9ebbc92 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -123,12 +123,12 @@ struct inet6_skb_parm { }; #if defined(CONFIG_NET_L3_MASTER_DEV) -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return flags & IP6SKB_L3SLAVE; } #else -static inline bool skb_l3mdev_slave(__u16 flags) +static inline bool ipv6_l3mdev_skb(__u16 flags) { return false; } @@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags) static inline int inet6_iif(const struct sk_buff *skb) { - bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); + bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; } +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; +#endif + return false; +} + struct tcp6_request_sock { struct tcp_request_sock tcp6rsk_tcp; }; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 8361c8d3edd1..b7e34313cdfe 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -290,7 +290,7 @@ #define GITS_BASER_TYPE_SHIFT (56) #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_ENTRY_SIZE_SHIFT (48) -#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d600303306eb..820c0ad54a01 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -44,6 +44,7 @@ static inline void kasan_disable_current(void) void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); +void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache); static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 15ec117ec537..8f2e059e4d45 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -31,7 +31,6 @@ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when * the last step cherry picks the 2nd arg, we get a zero. */ -#define config_enabled(cfg) ___is_defined(cfg) #define __is_defined(x) ___is_defined(x) #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) @@ -41,13 +40,13 @@ * otherwise. For boolean options, this is equivalent to * IS_ENABLED(CONFIG_FOO). */ -#define IS_BUILTIN(option) config_enabled(option) +#define IS_BUILTIN(option) __is_defined(option) /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 * otherwise. */ -#define IS_MODULE(option) config_enabled(option##_MODULE) +#define IS_MODULE(option) __is_defined(option##_MODULE) /* * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 96356ef012de..7056238fd9f5 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -269,10 +269,8 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn) } int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); -size_t kernfs_path_len(struct kernfs_node *kn); int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn, char *buf, size_t buflen); -char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen); void pr_cont_kernfs_name(struct kernfs_node *kn); void pr_cont_kernfs_path(struct kernfs_node *kn); struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); @@ -341,12 +339,10 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn) static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) { return -ENOSYS; } -static inline size_t kernfs_path_len(struct kernfs_node *kn) -{ return 0; } - -static inline char *kernfs_path(struct kernfs_node *kn, char *buf, - size_t buflen) -{ return NULL; } +static inline int kernfs_path_from_node(struct kernfs_node *root_kn, + struct kernfs_node *kn, + char *buf, size_t buflen) +{ return -ENOSYS; } static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { } static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { } @@ -436,6 +432,22 @@ static inline void kernfs_init(void) { } #endif /* CONFIG_KERNFS */ +/** + * kernfs_path - build full path of a given node + * @kn: kernfs_node of interest + * @buf: buffer to copy @kn's name into + * @buflen: size of @buf + * + * Builds and returns the full path of @kn in @buf of @buflen bytes. The + * path is built from the end of @buf so the returned pointer usually + * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated + * and %NULL is returned. + */ +static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) +{ + return kernfs_path_from_node(kn, NULL, buf, buflen); +} + static inline struct kernfs_node * kernfs_find_and_get(struct kernfs_node *kn, const char *name) { diff --git a/include/linux/libata.h b/include/linux/libata.h index e37d4f99f510..616eef4d81ea 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -46,7 +46,8 @@ #ifdef CONFIG_ATA_NONSTANDARD #include <asm/libata-portmap.h> #else -#include <asm-generic/libata-portmap.h> +#define ATA_PRIMARY_IRQ(dev) 14 +#define ATA_SECONDARY_IRQ(dev) 15 #endif /* diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f6a164297358..3be7abd6e722 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); -int mlx4_test_interrupts(struct mlx4_dev *dev); +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); +int mlx4_test_async(struct mlx4_dev *dev); int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, const u32 offset[], u32 value[], size_t array_len, u8 port); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 85c4786427e4..ecc451d89ccd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -418,8 +418,12 @@ struct mlx5_core_health { u32 prev; int miss_counter; bool sick; + /* wq spinlock to synchronize draining */ + spinlock_t wq_lock; struct workqueue_struct *wq; + unsigned long flags; struct work_struct work; + struct delayed_work recover_work; }; struct mlx5_cq_table { @@ -626,10 +630,6 @@ struct mlx5_db { }; enum { - MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, -}; - -enum { MLX5_COMP_EQ_SIZE = 1024, }; @@ -638,13 +638,6 @@ enum { MLX5_PTYS_EN = 1 << 2, }; -struct mlx5_db_pgdir { - struct list_head list; - DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); - __be32 *db_page; - dma_addr_t db_dma; -}; - typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { @@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_drain_health_wq(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); diff --git a/include/linux/mm.h b/include/linux/mm.h index e9caec6a51e9..a92c8d73aeaf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk, } #endif -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); +extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, + unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, int write); + void *buf, int len, unsigned int gup_flags); -long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - unsigned int foll_flags, struct page **pages, - struct vm_area_struct **vmas, int *nonblocking); long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, + unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, int *locked); + unsigned int gup_flags, struct page **pages, int *locked); long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - unsigned int gup_flags); + struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages); + struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); @@ -1306,7 +1302,7 @@ struct frame_vector { struct frame_vector *frame_vector_create(unsigned int nr_frames); void frame_vector_destroy(struct frame_vector *vec); int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, - bool write, bool force, struct frame_vector *vec); + unsigned int gup_flags, struct frame_vector *vec); void put_vaddr_frames(struct frame_vector *vec); int frame_vector_to_pages(struct frame_vector *vec); void frame_vector_to_pfns(struct frame_vector *vec); @@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); +int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, @@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ #define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ +#define FOLL_COW 0x4000 /* internal GUP flag */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7f2ae99e5daf..0f088f3a2fed 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -440,33 +440,7 @@ struct zone { seqlock_t span_seqlock; #endif - /* - * wait_table -- the array holding the hash table - * wait_table_hash_nr_entries -- the size of the hash table array - * wait_table_bits -- wait_table_size == (1 << wait_table_bits) - * - * The purpose of all these is to keep track of the people - * waiting for a page to become available and make them - * runnable again when possible. The trouble is that this - * consumes a lot of space, especially when so few things - * wait on pages at a given time. So instead of using - * per-page waitqueues, we use a waitqueue hash table. - * - * The bucket discipline is to sleep on the same queue when - * colliding and wake all in that wait queue when removing. - * When something wakes, it must check to be sure its page is - * truly available, a la thundering herd. The cost of a - * collision is great, but given the expected load of the - * table, they should be so rare as to be outweighed by the - * benefits from the saved space. - * - * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the - * primary users of these fields, and in mm/page_alloc.c - * free_area_init_core() performs the initialization of them. - */ - wait_queue_head_t *wait_table; - unsigned long wait_table_hash_nr_entries; - unsigned long wait_table_bits; + int initialized; /* Write-intensive fields used from the page allocator */ ZONE_PADDING(_pad1_) @@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) static inline bool zone_is_initialized(struct zone *zone) { - return !!zone->wait_table; + return zone->initialized; } static inline bool zone_is_empty(struct zone *zone) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 458c87631e7f..20ce8df115ac 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2167,7 +2167,10 @@ struct napi_gro_cb { /* Used to determine if flush_id can be ignored */ u8 is_atomic:1; - /* 5 bit hole */ + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 1 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -2178,6 +2181,40 @@ struct napi_gro_cb { #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); +static inline struct sk_buff **call_gro_receive(gro_receive_t cb, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + +typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, + struct sk_buff *); +static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct sk_buff **head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(sk, head, skb); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7676557ce357..fc3c24206593 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -16,7 +16,6 @@ #define _LINUX_NVME_H #include <linux/types.h> -#include <linux/uuid.h> /* NQN names in commands fields specified one size */ #define NVMF_NQN_FIELD_LEN 256 @@ -182,7 +181,7 @@ struct nvme_id_ctrl { char fr[8]; __u8 rab; __u8 ieee[3]; - __u8 mic; + __u8 cmic; __u8 mdts; __le16 cntlid; __le32 ver; @@ -202,7 +201,13 @@ struct nvme_id_ctrl { __u8 apsta; __le16 wctemp; __le16 cctemp; - __u8 rsvd270[50]; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __u8 rsvd316[4]; __le16 kas; __u8 rsvd322[190]; __u8 sqes; @@ -267,7 +272,7 @@ struct nvme_id_ns { __le16 nabo; __le16 nabspf; __u16 rsvd46; - __le64 nvmcap[2]; + __u8 nvmcap[16]; __u8 rsvd64[40]; __u8 nguid[16]; __u8 eui64[8]; @@ -277,6 +282,16 @@ struct nvme_id_ns { }; enum { + NVME_ID_CNS_NS = 0x00, + NVME_ID_CNS_CTRL = 0x01, + NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, + NVME_ID_CNS_NS_PRESENT_LIST = 0x10, + NVME_ID_CNS_NS_PRESENT = 0x11, + NVME_ID_CNS_CTRL_NS_LIST = 0x12, + NVME_ID_CNS_CTRL_LIST = 0x13, +}; + +enum { NVME_NS_FEAT_THIN = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_META_EXT = 0x10, @@ -556,8 +571,10 @@ enum nvme_admin_opcode { nvme_admin_set_features = 0x09, nvme_admin_get_features = 0x0a, nvme_admin_async_event = 0x0c, + nvme_admin_ns_mgmt = 0x0d, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, + nvme_admin_ns_attach = 0x15, nvme_admin_keep_alive = 0x18, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, @@ -583,6 +600,7 @@ enum { NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_HOST_MEM_BUF = 0x0d, NVME_FEAT_KATO = 0x0f, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, @@ -745,7 +763,7 @@ struct nvmf_common_command { struct nvmf_disc_rsp_page_entry { __u8 trtype; __u8 adrfam; - __u8 nqntype; + __u8 subtype; __u8 treq; __le16 portid; __le16 cntlid; @@ -794,7 +812,7 @@ struct nvmf_connect_command { }; struct nvmf_connect_data { - uuid_be hostid; + __u8 hostid[16]; __le16 cntlid; char resv4[238]; char subsysnqn[NVMF_NQN_FIELD_LEN]; @@ -905,12 +923,23 @@ enum { NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_FORMAT = 0x10a, - NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, NVME_SC_INVALID_QUEUE = 0x10c, NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, - NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, + NVME_SC_FW_NEEDS_RESET = 0x111, + NVME_SC_FW_NEEDS_MAX_TIME = 0x112, + NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, + NVME_SC_OVERLAPPING_RANGE = 0x114, + NVME_SC_NS_INSUFFICENT_CAP = 0x115, + NVME_SC_NS_ID_UNAVAILABLE = 0x116, + NVME_SC_NS_ALREADY_ATTACHED = 0x118, + NVME_SC_NS_IS_PRIVATE = 0x119, + NVME_SC_NS_NOT_ATTACHED = 0x11a, + NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, + NVME_SC_CTRL_LIST_INVALID = 0x11c, /* * I/O Command Set Specific - NVM commands: @@ -941,6 +970,7 @@ enum { NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_UNWRITTEN_BLOCK = 0x287, NVME_SC_DNR = 0x4000, }; @@ -960,6 +990,7 @@ struct nvme_completion { __le16 status; /* did the command fail, and if so, why? */ }; -#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) +#define NVME_VS(major, minor, tertiary) \ + (((major) << 16) | ((minor) << 8) | (tertiary)) #endif /* _LINUX_NVME_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 060d0ede88df..4741ecdb9817 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); +extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); #else /* !CONFIG_PERF_EVENTS: */ static inline void * diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h index e4c08c1ff0c5..a1bacf1150b2 100644 --- a/include/linux/pkeys.h +++ b/include/linux/pkeys.h @@ -25,7 +25,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm) static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { - WARN_ONCE(1, "free of protection key when disabled"); return -EINVAL; } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f9ae903bbb84..8978a60371f4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -146,6 +146,7 @@ enum qed_led_mode { #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define QED_COALESCE_MAX 0xFF +#define QED_DEFAULT_RX_USECS 12 /* forward */ struct qed_dev; diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index 99fbe6d55acb..f48d64b0e2fb 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h @@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv); bool qede_roce_supported(struct qede_dev *dev); -#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +#if IS_ENABLED(CONFIG_QED_RDMA) int qede_roce_dev_add(struct qede_dev *dev); void qede_roce_dev_event_open(struct qede_dev *dev); void qede_roce_dev_event_close(struct qede_dev *dev); diff --git a/include/linux/random.h b/include/linux/random.h index f7bb7a355cf7..7bd2403e4fef 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -18,9 +18,20 @@ struct random_ready_callback { }; extern void add_device_randomness(const void *, unsigned int); + +#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__) +static inline void add_latent_entropy(void) +{ + add_device_randomness((const void *)&latent_entropy, + sizeof(latent_entropy)); +} +#else +static inline void add_latent_entropy(void) {} +#endif + extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value); -extern void add_interrupt_randomness(int irq, int irq_flags); + unsigned int value) __latent_entropy; +extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 663fda2887f7..cc6e23eaac91 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -936,6 +936,7 @@ struct sk_buff_fclones { /** * skb_fclone_busy - check if fclone is busy + * @sk: socket * @skb: buffer * * Returns true if skb is a fast clone, and its clone is not freed. diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0d7abb8b7315..91a740f6b884 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); -//asmlinkage long sys_pkey_get(int pkey, unsigned long flags); -//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights, -// unsigned long flags); #endif diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 45f004e9cc59..2873baf5372a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -14,17 +14,6 @@ struct timespec; struct compat_timespec; #ifdef CONFIG_THREAD_INFO_IN_TASK -struct thread_info { - unsigned long flags; /* low level flags */ -}; - -#define INIT_THREAD_INFO(tsk) \ -{ \ - .flags = 0, \ -} -#endif - -#ifdef CONFIG_THREAD_INFO_IN_TASK #define current_thread_info() ((struct thread_info *)current) #endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f2d072787947..8f998afc1384 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_mc_close(struct sock *sk); void ipv6_sock_mc_close(struct sock *sk); bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, const struct in6_addr *src_addr); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c575583b50fb..2019310cf135 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -820,9 +820,9 @@ enum station_parameters_apply_mask { * (or NULL for no change) * @supported_rates_len: number of supported rates * @sta_flags_mask: station flags that changed - * (bitmask of BIT(NL80211_STA_FLAG_...)) + * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @sta_flags_set: station flags values - * (bitmask of BIT(NL80211_STA_FLAG_...)) + * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change * @peer_aid: mesh peer AID or zero for no change @@ -3160,47 +3160,54 @@ struct ieee80211_iface_limit { * * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total: * - * struct ieee80211_iface_limit limits1[] = { - * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, - * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, - * }; - * struct ieee80211_iface_combination combination1 = { - * .limits = limits1, - * .n_limits = ARRAY_SIZE(limits1), - * .max_interfaces = 2, - * .beacon_int_infra_match = true, - * }; + * .. code-block:: c + * + * struct ieee80211_iface_limit limits1[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, }, + * }; + * struct ieee80211_iface_combination combination1 = { + * .limits = limits1, + * .n_limits = ARRAY_SIZE(limits1), + * .max_interfaces = 2, + * .beacon_int_infra_match = true, + * }; * * * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total: * - * struct ieee80211_iface_limit limits2[] = { - * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | - * BIT(NL80211_IFTYPE_P2P_GO), }, - * }; - * struct ieee80211_iface_combination combination2 = { - * .limits = limits2, - * .n_limits = ARRAY_SIZE(limits2), - * .max_interfaces = 8, - * .num_different_channels = 1, - * }; + * .. code-block:: c + * + * struct ieee80211_iface_limit limits2[] = { + * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | + * BIT(NL80211_IFTYPE_P2P_GO), }, + * }; + * struct ieee80211_iface_combination combination2 = { + * .limits = limits2, + * .n_limits = ARRAY_SIZE(limits2), + * .max_interfaces = 8, + * .num_different_channels = 1, + * }; * * * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total. * - * This allows for an infrastructure connection and three P2P connections. + * This allows for an infrastructure connection and three P2P connections. + * + * .. code-block:: c + * + * struct ieee80211_iface_limit limits3[] = { + * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, + * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | + * BIT(NL80211_IFTYPE_P2P_CLIENT), }, + * }; + * struct ieee80211_iface_combination combination3 = { + * .limits = limits3, + * .n_limits = ARRAY_SIZE(limits3), + * .max_interfaces = 4, + * .num_different_channels = 2, + * }; * - * struct ieee80211_iface_limit limits3[] = { - * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, - * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | - * BIT(NL80211_IFTYPE_P2P_CLIENT), }, - * }; - * struct ieee80211_iface_combination combination3 = { - * .limits = limits3, - * .n_limits = ARRAY_SIZE(limits3), - * .max_interfaces = 4, - * .num_different_channels = 2, - * }; */ struct ieee80211_iface_combination { const struct ieee80211_iface_limit *limits; @@ -4120,14 +4127,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); */ /** + * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 + * @skb: the 802.11 data frame + * @ehdr: pointer to a &struct ethhdr that will get the header, instead + * of it being pushed into the SKB + * @addr: the device MAC address + * @iftype: the virtual interface type + * Return: 0 on success. Non-zero on error. + */ +int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype); + +/** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @addr: the device MAC address * @iftype: the virtual interface type * Return: 0 on success. Non-zero on error. */ -int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype); +static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) +{ + return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype); +} /** * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 @@ -4145,22 +4167,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, /** * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame * - * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of - * 802.3 frames. The @list will be empty if the decode fails. The - * @skb is consumed after the function returns. + * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. + * The @list will be empty if the decode fails. The @skb must be fully + * header-less before being passed in here; it is freed in this function. * - * @skb: The input IEEE 802.11n A-MSDU frame. + * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and * initialized by by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. - * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. + * @check_da: DA to check in the inner ethernet header, or NULL + * @check_sa: SA to check in the inner ethernet header, or NULL */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, - bool has_80211_header); + const u8 *check_da, const u8 *check_sa); /** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 515352c6280a..b0576cb2ab25 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -190,8 +190,8 @@ struct inet6_dev { __u32 if_flags; int dead; + u32 desync_factor; u8 rndid[8]; - struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; diff --git a/include/net/ip.h b/include/net/ip.h index bc43c0fcae12..5413883ac47f 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -38,7 +38,7 @@ struct sock; struct inet_skb_parm { int iif; struct ip_options opt; /* Compiled IP options */ - unsigned char flags; + u16 flags; #define IPSKB_FORWARDED BIT(0) #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) @@ -48,10 +48,16 @@ struct inet_skb_parm { #define IPSKB_DOREDIRECT BIT(5) #define IPSKB_FRAG_PMTU BIT(6) #define IPSKB_FRAG_SEGS BIT(7) +#define IPSKB_L3SLAVE BIT(8) u16 frag_max_size; }; +static inline bool ipv4_l3mdev_skb(u16 flags) +{ + return !!(flags & IPSKB_L3SLAVE); +} + static inline unsigned int ip_hdrlen(const struct sk_buff *skb) { return ip_hdr(skb)->ihl * 4; @@ -572,7 +578,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); */ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); -void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); +void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, @@ -594,7 +600,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) { - ip_cmsg_recv_offset(msg, skb, 0); + ip_cmsg_recv_offset(msg, skb, 0, 0); } bool icmp_global_allow(void); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index fb961a576abe..a74e2aa40ef4 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -230,6 +230,8 @@ struct fib6_table { rwlock_t tb6_lock; struct fib6_node tb6_root; struct inet_peer_base tb6_peers; + unsigned int flags; +#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; #define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index e0cd318d5103..f83e78d071a3 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -32,6 +32,7 @@ struct route_info { #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 +#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b9b24abd9103..5345d358a510 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -811,14 +811,18 @@ enum mac80211_rate_control_flags { * in the control information, and it will be filled by the rate * control algorithm according to what should be sent. For example, * if this array contains, in the format { <idx>, <count> } the - * information + * information:: + * * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } + * * then this means that the frame should be transmitted * up to twice at rate 3, up to twice at rate 2, and up to four * times at rate 1 if it doesn't get acknowledged. Say it gets * acknowledged by the peer after the fifth attempt, the status - * information should then contain + * information should then contain:: + * * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... + * * since it was transmitted twice at rate 3, twice at rate 2 * and once at rate 1 after which we received an acknowledgement. */ @@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags { * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) * @vht_nss: number of streams (VHT only) - * @flag: %RX_FLAG_* - * @vht_flag: %RX_VHT_FLAG_* + * @flag: %RX_FLAG_\* + * @vht_flag: %RX_VHT_FLAG_\* * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags { * @probe_req_reg: probe requests should be reported to mac80211 for this * interface. * @drv_priv: data area for driver use, will always be aligned to - * sizeof(void *). + * sizeof(void \*). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) */ struct ieee80211_vif { @@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates { * @wme: indicates whether the STA supports QoS/WME (if local devices does, * otherwise always false) * @drv_priv: data area for driver use, will always be aligned to - * sizeof(void *), size is determined in hw information. + * sizeof(void \*), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid * if wme is supported. The bits order is like in * IEEE80211_WMM_IE_STA_QOSINFO_AC_*. @@ -2152,12 +2156,12 @@ enum ieee80211_hw_flags { * * @radiotap_mcs_details: lists which MCS information can the HW * reports, by default it is set to _MCS, _GI and _BW but doesn't - * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only + * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only * adding _BW is supported today. * * @radiotap_vht_details: lists which VHT MCS information the HW reports, * the default is _GI | _BANDWIDTH. - * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. + * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * 'units_pos' member is set to a non-negative value it must be set to @@ -2492,6 +2496,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * in the software stack cares about, we will, in the future, have mac80211 * tell the driver which information elements are interesting in the sense * that we want to see changes in them. This will include + * * - a list of information element IDs * - a list of OUIs for the vendor information element * diff --git a/include/net/sock.h b/include/net/sock.h index 276489553338..f13ac87a8015 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -252,6 +252,7 @@ struct sock_common { * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes + * @sk_padding: unused element for alignment * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) @@ -302,7 +303,8 @@ struct sock_common { * @sk_backlog_rcv: callback to process the backlog * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 * @sk_reuseport_cb: reuseport group container - */ + * @sk_rcu: used during RCU grace period + */ struct sock { /* * Now struct inet_timewait_sock also uses sock_common, so please just diff --git a/include/net/tcp.h b/include/net/tcp.h index f83b7f220a65..5b82d4d94834 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -794,12 +794,23 @@ struct tcp_skb_cb { */ static inline int tcp_v6_iif(const struct sk_buff *skb) { - bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags); + bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; } #endif +/* TCP_SKB_CB reference means this can not be used from early demux */ +static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) + return true; +#endif + return false; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ diff --git a/include/net/udp.h b/include/net/udp.h index 18f1e6b91927..6134f37ba3ab 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -262,6 +262,7 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); int udp_rcv(struct sk_buff *skb); int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); int udp_init_sock(struct sock *sk); +int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags); unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 0255613a54a4..308adc4154f4 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -225,9 +225,9 @@ struct vxlan_config { struct vxlan_dev { struct hlist_node hlist; /* vni hash table */ struct list_head next; /* vxlan's per namespace list */ - struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */ + struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ #if IS_ENABLED(CONFIG_IPV6) - struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */ + struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */ #endif struct net_device *dev; struct net *net; /* netns for packet i/o */ diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h new file mode 100644 index 000000000000..eaaf56df4086 --- /dev/null +++ b/include/soc/fsl/bman.h @@ -0,0 +1,129 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FSL_BMAN_H +#define __FSL_BMAN_H + +/* wrapper for 48-bit buffers */ +struct bm_buffer { + union { + struct { + __be16 bpid; /* hi 8-bits reserved */ + __be16 hi; /* High 16-bits of 48-bit address */ + __be32 lo; /* Low 32-bits of 48-bit address */ + }; + __be64 data; + }; +} __aligned(8); +/* + * Restore the 48 bit address previously stored in BMan + * hardware pools as a dma_addr_t + */ +static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) +{ + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; +} + +static inline u64 bm_buffer_get64(const struct bm_buffer *buf) +{ + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; +} + +static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr) +{ + buf->hi = cpu_to_be16(upper_32_bits(addr)); + buf->lo = cpu_to_be32(lower_32_bits(addr)); +} + +static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf) +{ + return be16_to_cpu(buf->bpid) & 0xff; +} + +static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid) +{ + buf->bpid = cpu_to_be16(bpid & 0xff); +} + +/* Managed portal, high-level i/face */ + +/* Portal and Buffer Pools */ +struct bman_portal; +struct bman_pool; + +#define BM_POOL_MAX 64 /* max # of buffer pools */ + +/** + * bman_new_pool - Allocates a Buffer Pool object + * + * Creates a pool object, and returns a reference to it or NULL on error. + */ +struct bman_pool *bman_new_pool(void); + +/** + * bman_free_pool - Deallocates a Buffer Pool object + * @pool: the pool object to release + */ +void bman_free_pool(struct bman_pool *pool); + +/** + * bman_get_bpid - Returns a pool object's BPID. + * @pool: the pool object + * + * The returned value is the index of the encapsulated buffer pool, + * in the range of [0, @BM_POOL_MAX-1]. + */ +int bman_get_bpid(const struct bman_pool *pool); + +/** + * bman_release - Release buffer(s) to the buffer pool + * @pool: the buffer pool object to release to + * @bufs: an array of buffers to release + * @num: the number of buffers in @bufs (1-8) + * + * Adds the given buffers to RCR entries. If the RCR ring is unresponsive, + * the function will return -ETIMEDOUT. Otherwise, it returns zero. + */ +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num); + +/** + * bman_acquire - Acquire buffer(s) from a buffer pool + * @pool: the buffer pool object to acquire from + * @bufs: array for storing the acquired buffers + * @num: the number of buffers desired (@bufs is at least this big) + * + * Issues an "Acquire" command via the portal's management command interface. + * The return value will be the number of buffers obtained from the pool, or a + * negative error code if a h/w error or pool starvation was encountered. In + * the latter case, the content of @bufs is undefined. + */ +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); + +#endif /* __FSL_BMAN_H */ diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h new file mode 100644 index 000000000000..37f3eb001a16 --- /dev/null +++ b/include/soc/fsl/qman.h @@ -0,0 +1,1074 @@ +/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FSL_QMAN_H +#define __FSL_QMAN_H + +#include <linux/bitops.h> + +/* Hardware constants */ +#define QM_CHANNEL_SWPORTAL0 0 +#define QMAN_CHANNEL_POOL1 0x21 +#define QMAN_CHANNEL_POOL1_REV3 0x401 +extern u16 qm_channel_pool1; + +/* Portal processing (interrupt) sources */ +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ +/* + * This mask contains all the interrupt sources that need handling except DQRI, + * ie. that if present should trigger slow-path processing. + */ +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ + QM_PIRQ_MRI) + +/* For qman_static_dequeue_*** APIs */ +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff +/* for n in [1,15] */ +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +/* for conversion from n of qm_channel */ +static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) +{ + return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); +} + +/* --- QMan data structures (and associated constants) --- */ + +/* "Frame Descriptor (FD)" */ +struct qm_fd { + union { + struct { + u8 cfg8b_w1; + u8 bpid; /* Buffer Pool ID */ + u8 cfg8b_w3; + u8 addr_hi; /* high 8-bits of 40-bit address */ + __be32 addr_lo; /* low 32-bits of 40-bit address */ + } __packed; + __be64 data; + }; + __be32 cfg; /* format, offset, length / congestion */ + union { + __be32 cmd; + __be32 status; + }; +} __aligned(8); + +#define QM_FD_FORMAT_SG BIT(31) +#define QM_FD_FORMAT_LONG BIT(30) +#define QM_FD_FORMAT_COMPOUND BIT(29) +#define QM_FD_FORMAT_MASK GENMASK(31, 29) +#define QM_FD_OFF_SHIFT 20 +#define QM_FD_OFF_MASK GENMASK(28, 20) +#define QM_FD_LEN_MASK GENMASK(19, 0) +#define QM_FD_LEN_BIG_MASK GENMASK(28, 0) + +enum qm_fd_format { + /* + * 'contig' implies a contiguous buffer, whereas 'sg' implies a + * scatter-gather table. 'big' implies a 29-bit length with no offset + * field, otherwise length is 20-bit and offset is 9-bit. 'compound' + * implies a s/g-like table, where each entry itself represents a frame + * (contiguous or scatter-gather) and the 29-bit "length" is + * interpreted purely for congestion calculations, ie. a "congestion + * weight". + */ + qm_fd_contig = 0, + qm_fd_contig_big = QM_FD_FORMAT_LONG, + qm_fd_sg = QM_FD_FORMAT_SG, + qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, + qm_fd_compound = QM_FD_FORMAT_COMPOUND +}; + +static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) +{ + return be64_to_cpu(fd->data) & 0xffffffffffLLU; +} + +static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) +{ + return be64_to_cpu(fd->data) & 0xffffffffffLLU; +} + +static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr) +{ + fd->addr_hi = upper_32_bits(addr); + fd->addr_lo = cpu_to_be32(lower_32_bits(addr)); +} + +/* + * The 'format' field indicates the interpretation of the remaining + * 29 bits of the 32-bit word. + * If 'format' is _contig or _sg, 20b length and 9b offset. + * If 'format' is _contig_big or _sg_big, 29b length. + * If 'format' is _compound, 29b "congestion weight". + */ +static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK; +} + +static inline int qm_fd_get_offset(const struct qm_fd *fd) +{ + return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT; +} + +static inline int qm_fd_get_length(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK; +} + +static inline int qm_fd_get_len_big(const struct qm_fd *fd) +{ + return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK; +} + +static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt, + int off, int len) +{ + fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) | + ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK)); +} + +#define qm_fd_set_contig(fd, off, len) \ + qm_fd_set_param(fd, qm_fd_contig, off, len) +#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len) +#define qm_fd_set_contig_big(fd, len) \ + qm_fd_set_param(fd, qm_fd_contig_big, 0, len) +#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len) + +static inline void qm_fd_clear_fd(struct qm_fd *fd) +{ + fd->data = 0; + fd->cfg = 0; + fd->cmd = 0; +} + +/* Scatter/Gather table entry */ +struct qm_sg_entry { + union { + struct { + u8 __reserved1[3]; + u8 addr_hi; /* high 8-bits of 40-bit address */ + __be32 addr_lo; /* low 32-bits of 40-bit address */ + }; + __be64 data; + }; + __be32 cfg; /* E bit, F bit, length */ + u8 __reserved2; + u8 bpid; + __be16 offset; /* 13-bit, _res[13-15]*/ +} __packed; + +#define QM_SG_LEN_MASK GENMASK(29, 0) +#define QM_SG_OFF_MASK GENMASK(12, 0) +#define QM_SG_FIN BIT(30) +#define QM_SG_EXT BIT(31) + +static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) +{ + return be64_to_cpu(sg->data) & 0xffffffffffLLU; +} + +static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) +{ + return be64_to_cpu(sg->data) & 0xffffffffffLLU; +} + +static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr) +{ + sg->addr_hi = upper_32_bits(addr); + sg->addr_lo = cpu_to_be32(lower_32_bits(addr)); +} + +static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_FIN; +} + +static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_EXT; +} + +static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK; +} + +static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len) +{ + sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK); +} + +static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len) +{ + sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK)); +} + +static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) +{ + return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK; +} + +/* "Frame Dequeue Response" */ +struct qm_dqrr_entry { + u8 verb; + u8 stat; + u16 seqnum; /* 15-bit */ + u8 tok; + u8 __reserved2[3]; + u32 fqid; /* 24-bit */ + u32 contextB; + struct qm_fd fd; + u8 __reserved4[32]; +} __packed; +#define QM_DQRR_VERB_VBIT 0x80 +#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ +#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ +#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ +#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ +#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ +#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ +#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ +#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ + +/* "ERN Message Response" */ +/* "FQ State Change Notification" */ +union qm_mr_entry { + struct { + u8 verb; + u8 __reserved[63]; + }; + struct { + u8 verb; + u8 dca; + u16 seqnum; + u8 rc; /* Rej Code: 8-bit */ + u8 orp_hi; /* ORP: 24-bit */ + u16 orp_lo; + u32 fqid; /* 24-bit */ + u32 tag; + struct qm_fd fd; + u8 __reserved1[32]; + } __packed ern; + struct { + u8 verb; + u8 fqs; /* Frame Queue Status */ + u8 __reserved1[6]; + u32 fqid; /* 24-bit */ + u32 contextB; + u8 __reserved2[48]; + } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ +}; +#define QM_MR_VERB_VBIT 0x80 +/* + * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb + * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished + * from the other MR types by noting if the 0x20 bit is unset. + */ +#define QM_MR_VERB_TYPE_MASK 0x27 +#define QM_MR_VERB_DC_ERN 0x20 +#define QM_MR_VERB_FQRN 0x21 +#define QM_MR_VERB_FQRNI 0x22 +#define QM_MR_VERB_FQRL 0x23 +#define QM_MR_VERB_FQPN 0x24 +#define QM_MR_RC_MASK 0xf0 /* contains one of; */ +#define QM_MR_RC_CGR_TAILDROP 0x00 +#define QM_MR_RC_WRED 0x10 +#define QM_MR_RC_ERROR 0x20 +#define QM_MR_RC_ORPWINDOW_EARLY 0x30 +#define QM_MR_RC_ORPWINDOW_LATE 0x40 +#define QM_MR_RC_FQ_TAILDROP 0x50 +#define QM_MR_RC_ORPWINDOW_RETIRED 0x60 +#define QM_MR_RC_ORP_ZERO 0x70 +#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ +#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ + +/* + * An identical structure of FQD fields is present in the "Init FQ" command and + * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. + * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the + * latter has two inlines to assist with converting to/from the mant+exp + * representation. + */ +struct qm_fqd_stashing { + /* See QM_STASHING_EXCL_<...> */ + u8 exclusive; + /* Numbers of cachelines */ + u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */ +}; + +struct qm_fqd_oac { + /* "Overhead Accounting Control", see QM_OAC_<...> */ + u8 oac; /* oac[6-7], _res[0-5] */ + /* Two's-complement value (-128 to +127) */ + s8 oal; /* "Overhead Accounting Length" */ +}; + +struct qm_fqd { + /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */ + u8 orpc; + u8 cgid; + __be16 fq_ctrl; /* See QM_FQCTRL_<...> */ + __be16 dest_wq; /* channel[3-15], wq[0-2] */ + __be16 ics_cred; /* 15-bit */ + /* + * For "Initialize Frame Queue" commands, the write-enable mask + * determines whether 'td' or 'oac_init' is observed. For query + * commands, this field is always 'td', and 'oac_query' (below) reflects + * the Overhead ACcounting values. + */ + union { + __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */ + struct qm_fqd_oac oac_init; + }; + __be32 context_b; + union { + /* Treat it as 64-bit opaque */ + __be64 opaque; + struct { + __be32 hi; + __be32 lo; + }; + /* Treat it as s/w portal stashing config */ + /* see "FQD Context_A field used for [...]" */ + struct { + struct qm_fqd_stashing stashing; + /* + * 48-bit address of FQ context to + * stash, must be cacheline-aligned + */ + __be16 context_hi; + __be32 context_lo; + } __packed; + } context_a; + struct qm_fqd_oac oac_query; +} __packed; + +#define QM_FQD_CHAN_OFF 3 +#define QM_FQD_WQ_MASK GENMASK(2, 0) +#define QM_FQD_TD_EXP_MASK GENMASK(4, 0) +#define QM_FQD_TD_MANT_OFF 5 +#define QM_FQD_TD_MANT_MASK GENMASK(12, 5) +#define QM_FQD_TD_MAX 0xe0000000 +#define QM_FQD_TD_MANT_MAX 0xff +#define QM_FQD_OAC_OFF 6 +#define QM_FQD_AS_OFF 4 +#define QM_FQD_DS_OFF 2 +#define QM_FQD_XS_MASK 0x3 + +/* 64-bit converters for context_hi/lo */ +static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) +{ + return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; +} + +static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) +{ + return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; +} + +static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) +{ + return qm_fqd_stashing_get64(fqd); +} + +static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) +{ + fqd->context_a.context_hi = upper_32_bits(addr); + fqd->context_a.context_lo = lower_32_bits(addr); +} + +static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) +{ + fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr)); + fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); +} + +/* convert a threshold value into mant+exp representation */ +static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val, + int roundup) +{ + u32 e = 0; + int td, oddbit = 0; + + if (val > QM_FQD_TD_MAX) + return -ERANGE; + + while (val > QM_FQD_TD_MANT_MAX) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + + td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK; + td |= (e & QM_FQD_TD_EXP_MASK); + fqd->td = cpu_to_be16(td); + return 0; +} +/* and the other direction */ +static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd) +{ + int td = be16_to_cpu(fqd->td); + + return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF) + << (td & QM_FQD_TD_EXP_MASK); +} + +static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs) +{ + struct qm_fqd_stashing *st = &fqd->context_a.stashing; + + st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) | + ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) | + (cs & QM_FQD_XS_MASK); +} + +static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd) +{ + return fqd->context_a.stashing.cl; +} + +static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val) +{ + fqd->oac_init.oac = val << QM_FQD_OAC_OFF; +} + +static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val) +{ + fqd->oac_init.oal = val; +} + +static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq) +{ + fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) | + (wq & QM_FQD_WQ_MASK)); +} + +static inline int qm_fqd_get_chan(const struct qm_fqd *fqd) +{ + return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF; +} + +static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) +{ + return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK; +} + +/* See "Frame Queue Descriptor (FQD)" */ +/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ +#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ +#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ +#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ +#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ +#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ +#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ +#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ +#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ +#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ +#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ + +/* See "FQD Context_A field used for [...] */ +/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ +#define QM_STASHING_EXCL_ANNOTATION 0x04 +#define QM_STASHING_EXCL_DATA 0x02 +#define QM_STASHING_EXCL_CTX 0x01 + +/* See "Intra Class Scheduling" */ +/* FQD field 'OAC' (Overhead ACcounting) uses these constants */ +#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ +#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ + +/* + * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields + * and associated commands/responses. The WRED parameters are calculated from + * these fields as follows; + * MaxTH = MA * (2 ^ Mn) + * Slope = SA / (2 ^ Sn) + * MaxP = 4 * (Pn + 1) + */ +struct qm_cgr_wr_parm { + /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ + u32 word; +}; +/* + * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding + * management commands, this is padded to a 16-bit structure field, so that's + * how we represent it here. The congestion state threshold is calculated from + * these fields as follows; + * CS threshold = TA * (2 ^ Tn) + */ +struct qm_cgr_cs_thres { + /* _res[13-15], TA[5-12], Tn[0-4] */ + u16 word; +}; +/* + * This identical structure of CGR fields is present in the "Init/Modify CGR" + * commands and the "Query CGR" result. It's suctioned out here into its own + * struct. + */ +struct __qm_mc_cgr { + struct qm_cgr_wr_parm wr_parm_g; + struct qm_cgr_wr_parm wr_parm_y; + struct qm_cgr_wr_parm wr_parm_r; + u8 wr_en_g; /* boolean, use QM_CGR_EN */ + u8 wr_en_y; /* boolean, use QM_CGR_EN */ + u8 wr_en_r; /* boolean, use QM_CGR_EN */ + u8 cscn_en; /* boolean, use QM_CGR_EN */ + union { + struct { + u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ + u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ + }; + u32 cscn_targ; /* use QM_CGR_TARG_* */ + }; + u8 cstd_en; /* boolean, use QM_CGR_EN */ + u8 cs; /* boolean, only used in query response */ + struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */ + u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ +} __packed; +#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ +#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ +#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ +#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ +#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ +#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ +/* Convert CGR thresholds to/from "cs_thres" format */ +static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) +{ + return ((th->word >> 5) & 0xff) << (th->word & 0x1f); +} + +static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, + int roundup) +{ + u32 e = 0; + int oddbit = 0; + + while (val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if (roundup && oddbit) + val++; + } + th->word = ((val & 0xff) << 5) | (e & 0x1f); + return 0; +} + +/* "Initialize FQ" */ +struct qm_mcc_initfq { + u8 __reserved1[2]; + u16 we_mask; /* Write Enable Mask */ + u32 fqid; /* 24-bit */ + u16 count; /* Initialises 'count+1' FQDs */ + struct qm_fqd fqd; /* the FQD fields go here */ + u8 __reserved2[30]; +} __packed; +/* "Initialize/Modify CGR" */ +struct qm_mcc_initcgr { + u8 __reserve1[2]; + u16 we_mask; /* Write Enable Mask */ + struct __qm_mc_cgr cgr; /* CGR fields */ + u8 __reserved2[2]; + u8 cgid; + u8 __reserved3[32]; +} __packed; + +/* INITFQ-specific flags */ +#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ +#define QM_INITFQ_WE_OAC 0x0100 +#define QM_INITFQ_WE_ORPC 0x0080 +#define QM_INITFQ_WE_CGID 0x0040 +#define QM_INITFQ_WE_FQCTRL 0x0020 +#define QM_INITFQ_WE_DESTWQ 0x0010 +#define QM_INITFQ_WE_ICSCRED 0x0008 +#define QM_INITFQ_WE_TDTHRESH 0x0004 +#define QM_INITFQ_WE_CONTEXTB 0x0002 +#define QM_INITFQ_WE_CONTEXTA 0x0001 +/* INITCGR/MODIFYCGR-specific flags */ +#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ +#define QM_CGR_WE_WR_PARM_G 0x0400 +#define QM_CGR_WE_WR_PARM_Y 0x0200 +#define QM_CGR_WE_WR_PARM_R 0x0100 +#define QM_CGR_WE_WR_EN_G 0x0080 +#define QM_CGR_WE_WR_EN_Y 0x0040 +#define QM_CGR_WE_WR_EN_R 0x0020 +#define QM_CGR_WE_CSCN_EN 0x0010 +#define QM_CGR_WE_CSCN_TARG 0x0008 +#define QM_CGR_WE_CSTD_EN 0x0004 +#define QM_CGR_WE_CS_THRES 0x0002 +#define QM_CGR_WE_MODE 0x0001 + +#define QMAN_CGR_FLAG_USE_INIT 0x00000001 + + /* Portal and Frame Queues */ +/* Represents a managed portal */ +struct qman_portal; + +/* + * This object type represents QMan frame queue descriptors (FQD), it is + * cacheline-aligned, and initialised by qman_create_fq(). The structure is + * defined further down. + */ +struct qman_fq; + +/* + * This object type represents a QMan congestion group, it is defined further + * down. + */ +struct qman_cgr; + +/* + * This enum, and the callback type that returns it, are used when handling + * dequeued frames via DQRR. Note that for "null" callbacks registered with the + * portal object (for handling dequeues that do not demux because contextB is + * NULL), the return value *MUST* be qman_cb_dqrr_consume. + */ +enum qman_cb_dqrr_result { + /* DQRR entry can be consumed */ + qman_cb_dqrr_consume, + /* Like _consume, but requests parking - FQ must be held-active */ + qman_cb_dqrr_park, + /* Does not consume, for DCA mode only. */ + qman_cb_dqrr_defer, + /* + * Stop processing without consuming this ring entry. Exits the current + * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within + * an interrupt handler, the callback would typically call + * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, + * otherwise the interrupt will reassert immediately. + */ + qman_cb_dqrr_stop, + /* Like qman_cb_dqrr_stop, but consumes the current entry. */ + qman_cb_dqrr_consume_stop +}; +typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr); + +/* + * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They + * are always consumed after the callback returns. + */ +typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, + const union qm_mr_entry *msg); + +/* + * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + + * held-active + held-suspended are just "sched". Things like "retired" will not + * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until + * then, to indicate it's completing and to gate attempts to retry the retire + * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's + * technically impossible in the case of enqueue DCAs (which refer to DQRR ring + * index rather than the FQ that ring entry corresponds to), so repeated park + * commands are allowed (if you're silly enough to try) but won't change FQ + * state, and the resulting park notifications move FQs from "sched" to + * "parked". + */ +enum qman_fq_state { + qman_fq_state_oos, + qman_fq_state_parked, + qman_fq_state_sched, + qman_fq_state_retired +}; + +#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ +#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ +#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ +#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ +#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ +#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ + +/* + * Frame queue objects (struct qman_fq) are stored within memory passed to + * qman_create_fq(), as this allows stashing of caller-provided demux callback + * pointers at no extra cost to stashing of (driver-internal) FQ state. If the + * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, + * they should; + * + * (a) extend the qman_fq structure with their state; eg. + * + * // myfq is allocated and driver_fq callbacks filled in; + * struct my_fq { + * struct qman_fq base; + * int an_extra_field; + * [ ... add other fields to be associated with each FQ ...] + * } *myfq = some_my_fq_allocator(); + * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); + * + * // in a dequeue callback, access extra fields from 'fq' via a cast; + * struct my_fq *myfq = (struct my_fq *)fq; + * do_something_with(myfq->an_extra_field); + * [...] + * + * (b) when and if configuring the FQ for context stashing, specify how ever + * many cachelines are required to stash 'struct my_fq', to accelerate not + * only the QMan driver but the callback as well. + */ + +struct qman_fq_cb { + qman_cb_dqrr dqrr; /* for dequeued frames */ + qman_cb_mr ern; /* for s/w ERNs */ + qman_cb_mr fqs; /* frame-queue state changes*/ +}; + +struct qman_fq { + /* Caller of qman_create_fq() provides these demux callbacks */ + struct qman_fq_cb cb; + /* + * These are internal to the driver, don't touch. In particular, they + * may change, be removed, or extended (so you shouldn't rely on + * sizeof(qman_fq) being a constant). + */ + u32 fqid, idx; + unsigned long flags; + enum qman_fq_state state; + int cgr_groupid; +}; + +/* + * This callback type is used when handling congestion group entry/exit. + * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. + */ +typedef void (*qman_cb_cgr)(struct qman_portal *qm, + struct qman_cgr *cgr, int congested); + +struct qman_cgr { + /* Set these prior to qman_create_cgr() */ + u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ + qman_cb_cgr cb; + /* These are private to the driver */ + u16 chan; /* portal channel this object is created on */ + struct list_head node; +}; + +/* Flags to qman_create_fq() */ +#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ +#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ +#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ +#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ + +/* Flags to qman_init_fq() */ +#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ +#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ + + /* Portal Management */ +/** + * qman_p_irqsource_add - add processing sources to be interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Adds processing sources that should be interrupt-driven (rather than + * processed via qman_poll_***() functions). + */ +void qman_p_irqsource_add(struct qman_portal *p, u32 bits); + +/** + * qman_p_irqsource_remove - remove processing sources from being int-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Removes processing sources from being interrupt-driven, so that they will + * instead be processed via qman_poll_***() functions. + */ +void qman_p_irqsource_remove(struct qman_portal *p, u32 bits); + +/** + * qman_affine_cpus - return a mask of cpus that have affine portals + */ +const cpumask_t *qman_affine_cpus(void); + +/** + * qman_affine_channel - return the channel ID of an portal + * @cpu: the cpu whose affine portal is the subject of the query + * + * If @cpu is -1, the affine portal for the current CPU will be used. It is a + * bug to call this function for any value of @cpu (other than -1) that is not a + * member of the mask returned from qman_affine_cpus(). + */ +u16 qman_affine_channel(int cpu); + +/** + * qman_get_affine_portal - return the portal pointer affine to cpu + * @cpu: the cpu whose affine portal is the subject of the query + */ +struct qman_portal *qman_get_affine_portal(int cpu); + +/** + * qman_p_poll_dqrr - process DQRR (fast-path) entries + * @limit: the maximum number of DQRR entries to process + * + * Use of this function requires that DQRR processing not be interrupt-driven. + * The return value represents the number of DQRR entries processed. + */ +int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit); + +/** + * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR + * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) + * + * Adds a set of pool channels to the portal's static dequeue command register + * (SDQCR). The requested pools are limited to those the portal has dequeue + * access to. + */ +void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); + + /* FQ management */ +/** + * qman_create_fq - Allocates a FQ + * @fqid: the index of the FQD to encapsulate, must be "Out of Service" + * @flags: bit-mask of QMAN_FQ_FLAG_*** options + * @fq: memory for storing the 'fq', with callbacks filled in + * + * Creates a frame queue object for the given @fqid, unless the + * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is + * dynamically allocated (or the function fails if none are available). Once + * created, the caller should not touch the memory at 'fq' except as extended to + * adjacent memory for user-defined fields (see the definition of "struct + * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to + * pre-existing frame-queues that aren't to be otherwise interfered with, it + * prevents all other modifications to the frame queue. The TO_DCPORTAL flag + * causes the driver to honour any contextB modifications requested in the + * qm_init_fq() API, as this indicates the frame queue will be consumed by a + * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by + * software portals, the contextB field is controlled by the driver and can't be + * modified by the caller. + */ +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); + +/** + * qman_destroy_fq - Deallocates a FQ + * @fq: the frame queue object to release + * + * The memory for this frame queue object ('fq' provided in qman_create_fq()) is + * not deallocated but the caller regains ownership, to do with as desired. The + * FQ must be in the 'out-of-service' or in the 'parked' state. + */ +void qman_destroy_fq(struct qman_fq *fq); + +/** + * qman_fq_fqid - Queries the frame queue ID of a FQ object + * @fq: the frame queue object to query + */ +u32 qman_fq_fqid(struct qman_fq *fq); + +/** + * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" + * @fq: the frame queue object to modify, must be 'parked' or new. + * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options + * @opts: the FQ-modification settings, as defined in the low-level API + * + * The @opts parameter comes from the low-level portal API. Select + * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled + * rather than parked. NB, @opts can be NULL. + * + * Note that some fields and options within @opts may be ignored or overwritten + * by the driver; + * 1. the 'count' and 'fqid' fields are always ignored (this operation only + * affects one frame queue: @fq). + * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated + * 'fqd' structure's 'context_b' field are sometimes overwritten; + * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is + * initialised to a value used by the driver for demux. + * - if context_b is initialised for demux, so is context_a in case stashing + * is requested (see item 4). + * (So caller control of context_b is only possible for TO_DCPORTAL frame queue + * objects.) + * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's + * 'dest::channel' field will be overwritten to match the portal used to issue + * the command. If the WE_DESTWQ write-enable bit had already been set by the + * caller, the channel workqueue will be left as-is, otherwise the write-enable + * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag + * isn't set, the destination channel/workqueue fields and the write-enable bit + * are left as-is. + * 4. if the driver overwrites context_a/b for demux, then if + * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite + * context_a.address fields and will leave the stashing fields provided by the + * user alone, otherwise it will zero out the context_a.stashing fields. + */ +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); + +/** + * qman_schedule_fq - Schedules a FQ + * @fq: the frame queue object to schedule, must be 'parked' + * + * Schedules the frame queue, which must be Parked, which takes it to + * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. + */ +int qman_schedule_fq(struct qman_fq *fq); + +/** + * qman_retire_fq - Retires a FQ + * @fq: the frame queue object to retire + * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately + * + * Retires the frame queue. This returns zero if it succeeds immediately, +1 if + * the retirement was started asynchronously, otherwise it returns negative for + * failure. When this function returns zero, @flags is set to indicate whether + * the retired FQ is empty and/or whether it has any ORL fragments (to show up + * as ERNs). Otherwise the corresponding flags will be known when a subsequent + * FQRN message shows up on the portal's message ring. + * + * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or + * Active state), the completion will be via the message ring as a FQRN - but + * the corresponding callback may occur before this function returns!! Ie. the + * caller should be prepared to accept the callback as the function is called, + * not only once it has returned. + */ +int qman_retire_fq(struct qman_fq *fq, u32 *flags); + +/** + * qman_oos_fq - Puts a FQ "out of service" + * @fq: the frame queue object to be put out-of-service, must be 'retired' + * + * The frame queue must be retired and empty, and if any order restoration list + * was released as ERNs at the time of retirement, they must all be consumed. + */ +int qman_oos_fq(struct qman_fq *fq); + +/** + * qman_enqueue - Enqueue a frame to a frame queue + * @fq: the frame queue object to enqueue to + * @fd: a descriptor of the frame to be enqueued + * + * Fills an entry in the EQCR of portal @qm to enqueue the frame described by + * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' + * field is ignored. The return value is non-zero on error, such as ring full. + */ +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd); + +/** + * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs + * @result: is set by the API to the base FQID of the allocated range + * @count: the number of FQIDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_fqid_range(u32 *result, u32 count); +#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1) + +/** + * qman_release_fqid - Release the specified frame queue ID + * @fqid: the FQID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * FQID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_fqid(u32 fqid); + + /* Pool-channel management */ +/** + * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs + * @result: is set by the API to the base pool-channel ID of the allocated range + * @count: the number of pool-channel IDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_pool_range(u32 *result, u32 count); +#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1) + +/** + * qman_release_pool - Release the specified pool-channel ID + * @id: the pool-chan ID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * pool-channel ID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_pool(u32 id); + + /* CGR management */ +/** + * qman_create_cgr - Register a congestion group object + * @cgr: the 'cgr' object, with fields filled in + * @flags: QMAN_CGR_FLAG_* values + * @opts: optional state of CGR settings + * + * Registers this object to receiving congestion entry/exit callbacks on the + * portal affine to the cpu portal on which this API is executed. If opts is + * NULL then only the callback (cgr->cb) function is registered. If @flags + * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset + * any unspecified parameters) will be used rather than a modify hw hardware + * (which only modifies the specified parameters). + */ +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts); + +/** + * qman_delete_cgr - Deregisters a congestion group object + * @cgr: the 'cgr' object to deregister + * + * "Unplugs" this CGR object from the portal affine to the cpu on which this API + * is executed. This must be excuted on the same affine portal on which it was + * created. + */ +int qman_delete_cgr(struct qman_cgr *cgr); + +/** + * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU + * @cgr: the 'cgr' object to deregister + * + * This will select the proper CPU and run there qman_delete_cgr(). + */ +void qman_delete_cgr_safe(struct qman_cgr *cgr); + +/** + * qman_query_cgr_congested - Queries CGR's congestion status + * @cgr: the 'cgr' object to query + * @result: returns 'cgr's congestion status, 1 (true) if congested + */ +int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result); + +/** + * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs + * @result: is set by the API to the base CGR ID of the allocated range + * @count: the number of CGR IDs required + * + * Returns 0 on success, or a negative error code. + */ +int qman_alloc_cgrid_range(u32 *result, u32 count); +#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1) + +/** + * qman_release_cgrid - Release the specified CGR ID + * @id: the CGR ID to be released back to the resource pool + * + * This function can also be used to seed the allocator with + * CGR ID ranges that it can subsequently allocate from. + * Returns 0 on success, or a negative error code. + */ +int qman_release_cgrid(u32 id); + +#endif /* __FSL_QMAN_H */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index fb8e3b6febdf..c2119008990a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -177,6 +177,7 @@ enum tcm_sense_reason_table { TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), #undef R }; diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h new file mode 100644 index 000000000000..ab68640a18d0 --- /dev/null +++ b/include/trace/events/cgroup.h @@ -0,0 +1,163 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cgroup + +#if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CGROUP_H + +#include <linux/cgroup.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(cgroup_root, + + TP_PROTO(struct cgroup_root *root), + + TP_ARGS(root), + + TP_STRUCT__entry( + __field( int, root ) + __field( u16, ss_mask ) + __string( name, root->name ) + ), + + TP_fast_assign( + __entry->root = root->hierarchy_id; + __entry->ss_mask = root->subsys_mask; + __assign_str(name, root->name); + ), + + TP_printk("root=%d ss_mask=%#x name=%s", + __entry->root, __entry->ss_mask, __get_str(name)) +); + +DEFINE_EVENT(cgroup_root, cgroup_setup_root, + + TP_PROTO(struct cgroup_root *root), + + TP_ARGS(root) +); + +DEFINE_EVENT(cgroup_root, cgroup_destroy_root, + + TP_PROTO(struct cgroup_root *root), + + TP_ARGS(root) +); + +DEFINE_EVENT(cgroup_root, cgroup_remount, + + TP_PROTO(struct cgroup_root *root), + + TP_ARGS(root) +); + +DECLARE_EVENT_CLASS(cgroup, + + TP_PROTO(struct cgroup *cgrp), + + TP_ARGS(cgrp), + + TP_STRUCT__entry( + __field( int, root ) + __field( int, id ) + __field( int, level ) + __dynamic_array(char, path, + cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1 + : strlen("(null)")) + ), + + TP_fast_assign( + __entry->root = cgrp->root->hierarchy_id; + __entry->id = cgrp->id; + __entry->level = cgrp->level; + if (cgrp->kn) + cgroup_path(cgrp, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + else + __assign_str(path, "(null)"); + ), + + TP_printk("root=%d id=%d level=%d path=%s", + __entry->root, __entry->id, __entry->level, __get_str(path)) +); + +DEFINE_EVENT(cgroup, cgroup_mkdir, + + TP_PROTO(struct cgroup *cgroup), + + TP_ARGS(cgroup) +); + +DEFINE_EVENT(cgroup, cgroup_rmdir, + + TP_PROTO(struct cgroup *cgroup), + + TP_ARGS(cgroup) +); + +DEFINE_EVENT(cgroup, cgroup_release, + + TP_PROTO(struct cgroup *cgroup), + + TP_ARGS(cgroup) +); + +DEFINE_EVENT(cgroup, cgroup_rename, + + TP_PROTO(struct cgroup *cgroup), + + TP_ARGS(cgroup) +); + +DECLARE_EVENT_CLASS(cgroup_migrate, + + TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), + + TP_ARGS(dst_cgrp, task, threadgroup), + + TP_STRUCT__entry( + __field( int, dst_root ) + __field( int, dst_id ) + __field( int, dst_level ) + __dynamic_array(char, dst_path, + dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1 + : strlen("(null)")) + __field( int, pid ) + __string( comm, task->comm ) + ), + + TP_fast_assign( + __entry->dst_root = dst_cgrp->root->hierarchy_id; + __entry->dst_id = dst_cgrp->id; + __entry->dst_level = dst_cgrp->level; + if (dst_cgrp->kn) + cgroup_path(dst_cgrp, __get_dynamic_array(dst_path), + __get_dynamic_array_len(dst_path)); + else + __assign_str(dst_path, "(null)"); + __entry->pid = task->pid; + __assign_str(comm, task->comm); + ), + + TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s", + __entry->dst_root, __entry->dst_id, __entry->dst_level, + __get_str(dst_path), __entry->pid, __get_str(comm)) +); + +DEFINE_EVENT(cgroup_migrate, cgroup_attach_task, + + TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), + + TP_ARGS(dst_cgrp, task, threadgroup) +); + +DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks, + + TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), + + TP_ARGS(dst_cgrp, task, threadgroup) +); + +#endif /* _TRACE_CGROUP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index dbfee7e86ba6..9b1462e38b82 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -730,10 +730,6 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) #define __NR_pkey_free 290 __SYSCALL(__NR_pkey_free, sys_pkey_free) -#define __NR_pkey_get 291 -//__SYSCALL(__NR_pkey_get, sys_pkey_get) -#define __NR_pkey_set 292 -//__SYSCALL(__NR_pkey_set, sys_pkey_set) #undef __NR_syscalls #define __NR_syscalls 291 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 6965d0909554..cd2be1c8e9fb 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -75,6 +75,7 @@ header-y += bpf_perf_event.h header-y += bpf.h header-y += bpqether.h header-y += bsg.h +header-y += bt-bmc.h header-y += btrfs.h header-y += can.h header-y += capability.h diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h new file mode 100644 index 000000000000..d9ec766a63d0 --- /dev/null +++ b/include/uapi/linux/bt-bmc.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2015-2016, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_BT_BMC_H +#define _UAPI_LINUX_BT_BMC_H + +#include <linux/ioctl.h> + +#define __BT_BMC_IOCTL_MAGIC 0xb1 +#define BT_BMC_IOCTL_SMS_ATN _IO(__BT_BMC_IOCTL_MAGIC, 0x00) + +#endif /* _UAPI_LINUX_BT_BMC_H */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index ac5eacd3055b..db4c253f8011 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -239,7 +239,17 @@ struct btrfs_ioctl_fs_info_args { * Used by: * struct btrfs_ioctl_feature_flags */ -#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) +#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) +/* + * Older kernels (< 4.9) on big-endian systems produced broken free space tree + * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions + * < 4.7.3). If this bit is clear, then the free space tree cannot be trusted. + * btrfs-progs can also intentionally clear this bit to ask the kernel to + * rebuild the free space tree, however this might not work on older kernels + * that do not know about this bit. If not sure, clear the cache manually on + * first mount when booting older kernel versions. + */ +#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1) #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 099a4200732c..8e547231c1b7 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -119,8 +119,7 @@ struct ethtool_cmd { static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed) { - - ep->speed = (__u16)speed; + ep->speed = (__u16)(speed & 0xFFFF); ep->speed_hi = (__u16)(speed >> 16); } diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index d812172d1d7b..e5a2e68b2236 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -612,6 +612,8 @@ */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ @@ -619,6 +621,7 @@ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 262f0379d83a..5a78be518101 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -350,7 +350,7 @@ struct rtnexthop { #define RTNH_F_OFFLOAD 8 /* offloaded route */ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ -#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) +#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) /* Macros to handle hexthops */ diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h new file mode 100644 index 000000000000..75c270d839c8 --- /dev/null +++ b/include/uapi/rdma/qedr-abi.h @@ -0,0 +1,106 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __QEDR_USER_H__ +#define __QEDR_USER_H__ + +#include <linux/types.h> + +#define QEDR_ABI_VERSION (8) + +/* user kernel communication data structures. */ + +struct qedr_alloc_ucontext_resp { + __u64 db_pa; + __u32 db_size; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_srq_wr; + __u32 sges_per_send_wr; + __u32 sges_per_recv_wr; + __u32 sges_per_srq_wr; + __u32 max_cqes; +}; + +struct qedr_alloc_pd_ureq { + __u64 rsvd1; +}; + +struct qedr_alloc_pd_uresp { + __u32 pd_id; +}; + +struct qedr_create_cq_ureq { + __u64 addr; + __u64 len; +}; + +struct qedr_create_cq_uresp { + __u32 db_offset; + __u16 icid; +}; + +struct qedr_create_qp_ureq { + __u32 qp_handle_hi; + __u32 qp_handle_lo; + + /* SQ */ + /* user space virtual address of SQ buffer */ + __u64 sq_addr; + + /* length of SQ buffer */ + __u64 sq_len; + + /* RQ */ + /* user space virtual address of RQ buffer */ + __u64 rq_addr; + + /* length of RQ buffer */ + __u64 rq_len; +}; + +struct qedr_create_qp_uresp { + __u32 qp_id; + __u32 atomic_supported; + + /* SQ */ + __u32 sq_db_offset; + __u16 sq_icid; + + /* RQ */ + __u32 rq_db_offset; + __u16 rq_icid; + + __u32 rq_db2_offset; +}; + +#endif /* __QEDR_USER_H__ */ |