diff options
Diffstat (limited to 'include')
567 files changed, 16460 insertions, 3807 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 204f5819d464..cd84b12d1e60 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -126,8 +126,9 @@ struct acpi_exception_info { #define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B) #define AE_NOT_CONFIGURED EXCEP_ENV (0x001C) #define AE_ACCESS EXCEP_ENV (0x001D) +#define AE_IO_ERROR EXCEP_ENV (0x001E) -#define AE_CODE_ENV_MAX 0x001D +#define AE_CODE_ENV_MAX 0x001E /* * Programmer exceptions @@ -263,7 +264,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = { "There are no more Owner IDs available for ACPI tables or control methods"), EXCEP_TXT("AE_NOT_CONFIGURED", "The interface is not part of the current subsystem configuration"), - EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation") + EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"), + EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred") }; static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ad0a5ff3d4cd..14362a84c78e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -87,6 +87,8 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func, .package.elements = (eles) \ } +bool acpi_dev_present(const char *hid); + #ifdef CONFIG_ACPI #include <linux/proc_fs.h> @@ -631,7 +633,9 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev) static inline bool acpi_device_can_poweroff(struct acpi_device *adev) { - return adev->power.states[ACPI_STATE_D3_COLD].flags.valid; + return adev->power.states[ACPI_STATE_D3_COLD].flags.valid || + ((acpi_gbl_FADT.header.revision < 6) && + adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } #else /* CONFIG_ACPI */ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index fbc2baf2b9dc..0d824a28522d 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -349,12 +349,28 @@ void acpi_os_redirect_output(void *destination); #endif /* - * Debug input + * Debug IO */ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read); #endif +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals +acpi_status acpi_os_initialize_command_signals(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals +void acpi_os_terminate_command_signals(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready +acpi_status acpi_os_wait_command_ready(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_notify_command_complete +acpi_status acpi_os_notify_command_complete(void); +#endif + /* * Obtain ACPI table(s) */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 3aaaa8630735..012b2eed7a93 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20150930 +#define ACPI_CA_VERSION 0x20151218 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -190,6 +190,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); /* + * Optionally support group module level code. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE); + +/* * Optionally use 32-bit FADT addresses if and when there is a conflict * (address mismatch) between the 32-bit and 64-bit versions of the * address. Although ACPICA adheres to the ACPI specification which @@ -263,6 +268,19 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT); ACPI_INIT_GLOBAL(u32, acpi_dbg_level, ACPI_DEBUG_DEFAULT); ACPI_INIT_GLOBAL(u32, acpi_dbg_layer, 0); +/* Optionally enable timer output with Debug Object output */ + +ACPI_INIT_GLOBAL(u8, acpi_gbl_display_debug_timer, FALSE); + +/* + * Debugger command handshake globals. Host OSes need to access these + * variables to implement their own command handshake mechanism. + */ +#ifdef ACPI_DEBUGGER +ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE); +ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]); +#endif + /* * Other miscellaneous globals */ @@ -366,6 +384,29 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); #endif /* ACPI_APPLICATION */ +/* + * Debugger prototypes + * + * All interfaces used by debugger will be configured + * out of the ACPICA build unless the ACPI_DEBUGGER + * flag is defined. + */ +#ifdef ACPI_DEBUGGER +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + ACPI_EXTERNAL_RETURN_OK(prototype) + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + ACPI_EXTERNAL_RETURN_VOID(prototype) + +#else +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_DEBUGGER */ + /***************************************************************************** * * ACPICA public interface prototypes @@ -822,17 +863,9 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_set_firmware_waking_vectors + acpi_set_firmware_waking_vector (acpi_physical_address physical_address, acpi_physical_address physical_address64)) -ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_set_firmware_waking_vector(u32 - physical_address)) -#if ACPI_MACHINE_WIDTH == 64 -ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_set_firmware_waking_vector64(u64 - physical_address)) -#endif /* * ACPI Timer interfaces */ @@ -929,6 +962,8 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status void **data, void (*callback)(void *))) +void acpi_run_debugger(char *batch_buffer); + void acpi_set_debugger_thread_id(acpi_thread_id thread_id); #endif /* __ACXFACE_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index f914958c4adb..9633f606d89e 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -1148,7 +1148,7 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); #define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */ -/* Structures used for device/processor HID, UID, CID, and SUB */ +/* Structures used for device/processor HID, UID, CID */ struct acpi_pnp_device_id { u32 length; /* Length of string + null */ @@ -1178,7 +1178,6 @@ struct acpi_device_info { u64 address; /* _ADR value */ struct acpi_pnp_device_id hardware_id; /* _HID value */ struct acpi_pnp_device_id unique_id; /* _UID value */ - struct acpi_pnp_device_id subsystem_id; /* _SUB value */ struct acpi_pnp_device_id class_code; /* _CLS value */ struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */ }; @@ -1193,13 +1192,12 @@ struct acpi_device_info { #define ACPI_VALID_ADR 0x0002 #define ACPI_VALID_HID 0x0004 #define ACPI_VALID_UID 0x0008 -#define ACPI_VALID_SUB 0x0010 #define ACPI_VALID_CID 0x0020 #define ACPI_VALID_CLS 0x0040 #define ACPI_VALID_SXDS 0x0100 #define ACPI_VALID_SXWS 0x0200 -/* Flags for _STA return value (current_status above) */ +/* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 323e5daece54..e21857d2ec05 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -150,6 +150,8 @@ */ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals /* * OSL interfaces used by utilities diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index fd6d70fe1219..f903fe64259a 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -129,6 +129,16 @@ static inline u8 acpi_os_readable(void *pointer, acpi_size length) return TRUE; } +static inline acpi_status acpi_os_initialize_command_signals(void) +{ + return AE_OK; +} + +static inline void acpi_os_terminate_command_signals(void) +{ + return; +} + /* * OSL interfaces added by Linux */ diff --git a/include/acpi/video.h b/include/acpi/video.h index c62392d9b52a..f11d342b4567 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -2,6 +2,7 @@ #define __ACPI_VIDEO_H #include <linux/errno.h> /* for ENODEV */ +#include <linux/types.h> /* for bool */ struct acpi_device; @@ -31,6 +32,7 @@ extern int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, void **edid); extern enum acpi_backlight_type acpi_video_get_backlight_type(void); extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); +extern bool acpi_video_handles_brightness_key_presses(void); #else static inline int acpi_video_register(void) { return 0; } static inline void acpi_video_unregister(void) { return; } @@ -46,6 +48,10 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) { } +static inline bool acpi_video_handles_brightness_key_presses(void) +{ + return false; +} #endif #endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index b42afada1280..0f45f93ef692 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -93,7 +93,7 @@ #endif /* CONFIG_SMP */ #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #endif #ifndef smp_mb__before_atomic diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 4b4b056a6eb0..5148150cc80b 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -1,6 +1,8 @@ #ifndef __ASM_MEMORY_MODEL_H #define __ASM_MEMORY_MODEL_H +#include <linux/pfn.h> + #ifndef __ASSEMBLY__ #if defined(CONFIG_FLATMEM) @@ -72,7 +74,7 @@ /* * Convert a physical address to a Page Frame Number and back */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __phys_to_pfn(paddr) PHYS_PFN(paddr) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 14b0ff32fb9f..0b3c0d39ef75 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1,6 +1,8 @@ #ifndef _ASM_GENERIC_PGTABLE_H #define _ASM_GENERIC_PGTABLE_H +#include <linux/pfn.h> + #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU @@ -207,11 +209,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH -extern void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#endif - #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, @@ -554,7 +551,7 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, * by vm_insert_pfn(). */ static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn) + pfn_t pfn) { return 0; } @@ -569,7 +566,7 @@ static inline int track_pfn_copy(struct vm_area_struct *vma) } /* - * untrack_pfn_vma is called while unmapping a pfnmap for a region. + * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ @@ -577,15 +574,23 @@ static inline void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { } + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} #else extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size); extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn); + pfn_t pfn); extern int track_pfn_copy(struct vm_area_struct *vma); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif #ifdef __HAVE_COLOR_ZERO_PAGE @@ -619,10 +624,6 @@ static inline int pmd_trans_huge(pmd_t pmd) { return 0; } -static inline int pmd_trans_splitting(pmd_t pmd) -{ - return 0; -} #ifndef __HAVE_ARCH_PMD_WRITE static inline int pmd_write(pmd_t pmd) { diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index e2aadbc7151f..39e1cb201b8e 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -12,8 +12,9 @@ * GNU General Public License for more details. * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * - * Authors: Waiman Long <waiman.long@hp.com> + * Authors: Waiman Long <waiman.long@hpe.com> */ #ifndef __ASM_GENERIC_QSPINLOCK_H #define __ASM_GENERIC_QSPINLOCK_H @@ -62,7 +63,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) static __always_inline int queued_spin_trylock(struct qspinlock *lock) { if (!atomic_read(&lock->val) && - (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) + (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) return 1; return 0; } @@ -77,7 +78,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val; - val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); + val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; queued_spin_lock_slowpath(lock, val); @@ -93,7 +94,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * smp_mb__before_atomic() in order to guarantee release semantics */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_sub(_Q_LOCKED_VAL, &lock->val); } #endif diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index b58fd667f87b..af0254c09424 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -4,6 +4,7 @@ /* References to section boundaries */ #include <linux/compiler.h> +#include <linux/types.h> /* * Usage guidelines: @@ -63,4 +64,68 @@ static inline int arch_is_kernel_data(unsigned long addr) } #endif +/** + * memory_contains - checks if an object is contained within a memory region + * @begin: virtual address of the beginning of the memory region + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the memory region defined by @begin and @end, false + * otherwise. + */ +static inline bool memory_contains(void *begin, void *end, void *virt, + size_t size) +{ + return virt >= begin && virt + size <= end; +} + +/** + * memory_intersects - checks if the region occupied by an object intersects + * with another memory region + * @begin: virtual address of the beginning of the memory regien + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the region specified by @begin and @end, false otherwise. + */ +static inline bool memory_intersects(void *begin, void *end, void *virt, + size_t size) +{ + void *vend = virt + size; + + return (virt >= begin && virt < end) || (vend >= begin && vend < end); +} + +/** + * init_section_contains - checks if an object is contained within the init + * section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the init section, false otherwise. + */ +static inline bool init_section_contains(void *virt, size_t size) +{ + return memory_contains(__init_begin, __init_end, virt, size); +} + +/** + * init_section_intersects - checks if the region occupied by an object + * intersects with the init section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the init section, false otherwise. + */ +static inline bool init_section_intersects(void *virt, size_t size) +{ + return memory_intersects(__init_begin, __init_end, virt, size); +} + #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 9916d0e4eff5..25d0914481a2 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -23,6 +23,12 @@ #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) +#define CNTHCTL_EL1PCTEN (1 << 0) +#define CNTHCTL_EL1PCEN (1 << 1) +#define CNTHCTL_EVNTEN (1 << 2) +#define CNTHCTL_EVNTDIR (1 << 3) +#define CNTHCTL_EVNTI (0xF << 4) + enum arch_timer_reg { ARCH_TIMER_REG_CTRL, ARCH_TIMER_REG_TVAL, diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 077cae1e6b51..84d13b11ad7b 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -128,6 +128,7 @@ struct aead_request { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. + * @base: Definition of a generic crypto cipher algorithm. * * All fields except @ivsize is mandatory and must be filled. */ diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 45cd5b328040..354de15cea6b 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -21,9 +21,9 @@ * @src: Source data * @dst: Destination data * @src_len: Size of the input buffer - * @dst_len: Size of the output buffer. It needs to be at leaset + * @dst_len: Size of the output buffer. It needs to be at least * as big as the expected result depending on the operation - * After operation it will be updated with the acctual size of the + * After operation it will be updated with the actual size of the * result. * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. @@ -59,7 +59,7 @@ struct crypto_akcipher { * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation - * @encrypt: Function performs an encrytp operation as defined by public key + * @encrypt: Function performs an encrypt operation as defined by public key * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation @@ -73,7 +73,7 @@ struct crypto_akcipher { * @set_priv_key: Function invokes the algorithm specific set private key * function, which knows how to decode and interpret * the BER encoded private key - * @max_size: Function returns dest buffer size reqired for a given key. + * @max_size: Function returns dest buffer size required for a given key. * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -232,7 +232,7 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, } /** - * akcipher_request_set_crypt() -- Sets reqest parameters + * akcipher_request_set_crypt() -- Sets request parameters * * Sets parameters required by crypto operation * diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index e1e5a3e5dd1b..56f217d41f12 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -34,6 +34,9 @@ #define TGR160_DIGEST_SIZE 20 #define TGR192_DIGEST_SIZE 24 +/* not defined in include/crypto/ */ +#define SM3256_DIGEST_SIZE 32 + extern const char *const hash_algo_name[HASH_ALGO__LAST]; extern const int hash_digest_size[HASH_ALGO__LAST]; diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 9a2bda15e454..479a0078f0f7 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -13,6 +13,22 @@ #ifndef _CRYPTO_AKCIPHER_INT_H #define _CRYPTO_AKCIPHER_INT_H #include <crypto/akcipher.h> +#include <crypto/algapi.h> + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; /* * Transform internal helpers. @@ -38,6 +54,56 @@ static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; } +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + /** * crypto_register_akcipher() -- Register public key algorithm * @@ -57,4 +123,16 @@ int crypto_register_akcipher(struct akcipher_alg *alg); * @alg: algorithm definition */ void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); #endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index f997e2d29b5a..c7585bdecbc2 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); void rsa_free_key(struct rsa_key *rsa_key); + +extern struct crypto_template rsa_pkcs1pad_tmpl; #endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h index 146af825eedb..327deac963c0 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -13,6 +13,8 @@ #define MD5_H2 0x98badcfeUL #define MD5_H3 0x10325476UL +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + struct md5_state { u32 hash[MD5_HASH_WORDS]; u32 block[MD5_BLOCK_WORDS]; diff --git a/include/crypto/sha.h b/include/crypto/sha.h index dd7905a3c22e..c94d3eb1cefd 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -64,6 +64,12 @@ #define SHA512_H6 0x1f83d9abfb41bd6bULL #define SHA512_H7 0x5be0cd19137e2179ULL +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + struct sha1_state { u32 state[SHA1_DIGEST_SIZE / 4]; u64 count; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0a271ca1f7c7..d7162cf1c3e1 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -349,6 +349,8 @@ struct drm_file { struct list_head event_list; int event_space; + struct mutex event_read_lock; + struct drm_prime_file_private prime; }; @@ -585,6 +587,13 @@ struct drm_driver { int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + /** + * Hook for allocating the GEM object struct, for use by core + * helpers. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + /* prime: */ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, @@ -1059,7 +1068,7 @@ void drm_dev_ref(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); +int drm_dev_set_unique(struct drm_device *dev, const char *name); struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); @@ -1108,6 +1117,7 @@ static inline int drm_pci_set_busid(struct drm_device *dev, #define DRM_PCIE_SPEED_80 4 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); +extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw); /* platform section */ extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); @@ -1121,4 +1131,7 @@ static __inline__ bool drm_can_sleep(void) return true; } +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + #endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 4b74c97d297a..d3eaa5df187a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -130,10 +130,6 @@ int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); -int -drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc); - void drm_atomic_legacy_backoff(struct drm_atomic_state *state); void @@ -149,7 +145,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((connector) = (state)->connectors[__i], \ (connector_state) = (state)->connector_states[__i], 1); \ (__i)++) \ - if (connector) + for_each_if (connector) #define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ for ((__i) = 0; \ @@ -157,7 +153,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((crtc) = (state)->crtcs[__i], \ (crtc_state) = (state)->crtc_states[__i], 1); \ (__i)++) \ - if (crtc_state) + for_each_if (crtc_state) #define for_each_plane_in_state(state, plane, plane_state, __i) \ for ((__i) = 0; \ @@ -165,7 +161,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((plane) = (state)->planes[__i], \ (plane_state) = (state)->plane_states[__i], 1); \ (__i)++) \ - if (plane_state) + for_each_if (plane_state) static inline bool drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) { diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 8cba54a2a0a0..89d008dc08e2 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -62,6 +62,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, + bool atomic); void drm_atomic_helper_swap_state(struct drm_device *dev, struct drm_atomic_state *state); @@ -81,6 +83,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set); int __drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_atomic_state *state); +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); @@ -118,6 +126,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); void drm_atomic_helper_connector_reset(struct drm_connector *connector); void __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3f0c6909dda1..c65a212db77e 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -85,7 +85,11 @@ static inline uint64_t I642U64(int64_t val) return (uint64_t)*((uint64_t *)&val); } -/* rotation property bits */ +/* + * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the + * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and + * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation + */ #define DRM_ROTATE_MASK 0x0f #define DRM_ROTATE_0 0 #define DRM_ROTATE_90 1 @@ -158,23 +162,60 @@ struct drm_tile_group { u8 group_data[8]; }; +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ struct drm_framebuffer_funcs { - /* note: use drm_framebuffer_remove() */ + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by ->fb_create() in + * &drm_mode_config_funcs. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. + */ void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in struct &drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); - /* - * Optional callback for the dirty fb ioctl. + /** + * @dirty: + * + * Optional callback for the dirty fb IOCTL. + * + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. * - * Userspace can notify the driver via this callback - * that a area of the framebuffer has changed and should - * be flushed to the display hardware. + * RETURNS: * - * See documentation in drm_mode.h for the struct - * drm_mode_fb_dirty_cmd for more information as all - * the semantics and arguments have a one to one mapping - * on this function. + * 0 on success or a negative error code on failure. */ int (*dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, @@ -250,6 +291,11 @@ struct drm_plane; struct drm_bridge; struct drm_atomic_state; +struct drm_crtc_helper_funcs; +struct drm_encoder_helper_funcs; +struct drm_connector_helper_funcs; +struct drm_plane_helper_funcs; + /** * struct drm_crtc_state - mutable CRTC state * @crtc: backpointer to the CRTC @@ -260,6 +306,7 @@ struct drm_atomic_state; * @active_changed: crtc_state->active has been toggled. * @connectors_changed: connectors to this crtc have been updated * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes + * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @last_vblank_count: for helpers and drivers to capture the vblank of the * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings @@ -293,6 +340,8 @@ struct drm_crtc_state { */ u32 plane_mask; + u32 connector_mask; + /* last_vblank_count: for vblank waits before cleanup */ u32 last_vblank_count; @@ -311,23 +360,6 @@ struct drm_crtc_state { /** * struct drm_crtc_funcs - control CRTCs for a given device - * @save: save CRTC state - * @restore: restore CRTC state - * @reset: reset CRTC after state has been invalidated (e.g. resume) - * @cursor_set: setup the cursor - * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set - * @cursor_move: move the cursor - * @gamma_set: specify color ramp for CRTC - * @destroy: deinit and free object - * @set_property: called when a property is changed - * @set_config: apply a new CRTC configuration - * @page_flip: initiate a page flip - * @atomic_duplicate_state: duplicate the atomic state for this CRTC - * @atomic_destroy_state: destroy an atomic state for this CRTC - * @atomic_set_property: set a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_set_property()) - * @atomic_get_property: get a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_get_property()) * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -339,54 +371,317 @@ struct drm_crtc_state { * bus accessors. */ struct drm_crtc_funcs { - /* Save CRTC state */ - void (*save)(struct drm_crtc *crtc); /* suspend? */ - /* Restore CRTC state */ - void (*restore)(struct drm_crtc *crtc); /* resume? */ - /* Reset CRTC state */ + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_crtc *crtc); - /* cursor controls */ + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_move)(struct drm_crtc *crtc, int x, int y); - /* Set gamma on the CRTC */ + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * NOTE: + * + * Drivers that support gamma tables and also fbdev emulation through + * the provided helper library need to take care to fill out the gamma + * hooks for both. Currently there's a bit an unfortunate duplication + * going on, which should eventually be unified to just one set of + * hooks. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); - /* Object destroy routine */ + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_crtc *crtc); + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * struct &drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_config)(struct drm_mode_set *set); - /* - * Flip to the given framebuffer. This implements the page - * flip ioctl described in drm_mode.h, specifically, the - * implementation must return immediately and block all - * rendering to the current fb until the flip has completed. - * If userspace set the event flag in the ioctl, the event - * argument will point to an event to send back when the flip - * completes, otherwise it will be NULL. + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls the CRTC ->page_flip() operation with a + * pointer to the new frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a struct &drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * FIXME: + * + * Up to that point drivers need to manage events themselves and can use + * even->base.list freely for that. Specifically they need to ensure + * that they don't send out page flip (or vblank) events for which the + * corresponding drm file has been closed already. The drm core + * unfortunately does not (yet) take care of that. Therefore drivers + * currently must clean up and release pending events in their + * ->preclose driver function. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * ->page_flip() operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_crtc_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_crtc should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before crtc->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_crtc *crtc, struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC IOCTL. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ int (*atomic_get_property)(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, @@ -416,7 +711,7 @@ struct drm_crtc_funcs { * @properties: property tracking for this CRTC * @state: current atomic state for this CRTC * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. @@ -426,6 +721,8 @@ struct drm_crtc { struct device_node *port; struct list_head head; + char *name; + /* * crtc mutex * @@ -463,14 +760,14 @@ struct drm_crtc { uint16_t *gamma_store; /* if you are using the helper */ - const void *helper_private; + const struct drm_crtc_helper_funcs *helper_private; struct drm_object_properties properties; struct drm_crtc_state *state; /* - * For legacy crtc ioctls so that atomic drivers can get at the locking + * For legacy crtc IOCTLs so that atomic drivers can get at the locking * acquire context. */ struct drm_modeset_acquire_ctx *acquire_ctx; @@ -495,54 +792,239 @@ struct drm_connector_state { /** * struct drm_connector_funcs - control connectors on a given device - * @dpms: set power state - * @save: save connector state - * @restore: restore connector state - * @reset: reset connector after state has been invalidated (e.g. resume) - * @detect: is this connector active? - * @fill_modes: fill mode list for this connector - * @set_property: property for this connector may need an update - * @destroy: make object go away - * @force: notify the driver that the connector is forced on - * @atomic_duplicate_state: duplicate the atomic state for this connector - * @atomic_destroy_state: destroy an atomic state for this connector - * @atomic_set_property: set a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_set_property()) - * @atomic_get_property: get a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_get_property()) * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, * etc. */ struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_dpms() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*dpms)(struct drm_connector *connector, int mode); - void (*save)(struct drm_connector *connector); - void (*restore)(struct drm_connector *connector); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_connector *connector); - /* Check to see if anything is attached to the connector. - * @force is set to false whilst polling, true when checking the - * connector due to user request. @force can be used by the driver - * to avoid expensive, destructive operations during automated - * probing. + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. */ enum drm_connector_status (*detect)(struct drm_connector *connector, bool force); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ + void (*force)(struct drm_connector *connector); + + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to connector->modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * connector->modes. Furthermore it must update connector->status and + * connector->edid. If no EDID has been received for this output + * connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() or + * drm_helper_probe_single_connector_modes_nomerge() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into connector->modes. + */ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_connector *connector, struct drm_property *property, uint64_t val); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ void (*destroy)(struct drm_connector *connector); - void (*force)(struct drm_connector *connector); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before connector->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_connector *connector, struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR IOCTL. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ int (*atomic_get_property)(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, @@ -551,13 +1033,26 @@ struct drm_connector_funcs { /** * struct drm_encoder_funcs - encoder controls - * @reset: reset state (e.g. at init or resume time) - * @destroy: cleanup and free associated data * * Encoders sit between CRTCs and connectors. */ struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ void (*destroy)(struct drm_encoder *encoder); }; @@ -593,7 +1088,7 @@ struct drm_encoder { struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; - const void *helper_private; + const struct drm_encoder_helper_funcs *helper_private; }; /* should we poll this connector for connects and disconnects */ @@ -698,7 +1193,7 @@ struct drm_connector { /* requested DPMS state */ int dpms; - const void *helper_private; + const struct drm_connector_helper_funcs *helper_private; /* forced on connector */ struct drm_cmdline_mode cmdline_mode; @@ -778,40 +1273,203 @@ struct drm_plane_state { /** * struct drm_plane_funcs - driver plane control functions - * @update_plane: update the plane configuration - * @disable_plane: shut down the plane - * @destroy: clean up plane resources - * @reset: reset plane after state has been invalidated (e.g. resume) - * @set_property: called when a property is changed - * @atomic_duplicate_state: duplicate the atomic state for this plane - * @atomic_destroy_state: destroy an atomic state for this plane - * @atomic_set_property: set a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_set_property()) - * @atomic_get_property: get a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_get_property()) */ struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -<errorname>EINVAL</errorname> error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*disable_plane)(struct drm_plane *plane); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_plane *plane); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_plane_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before plane->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_plane *plane, struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE IOCTL. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ int (*atomic_get_property)(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, @@ -824,6 +1482,7 @@ enum drm_plane_type { DRM_PLANE_TYPE_CURSOR, }; + /** * struct drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to @@ -846,6 +1505,8 @@ struct drm_plane { struct drm_device *dev; struct list_head head; + char *name; + struct drm_modeset_lock mutex; struct drm_mode_object base; @@ -866,7 +1527,7 @@ struct drm_plane { enum drm_plane_type type; - const void *helper_private; + const struct drm_plane_helper_funcs *helper_private; struct drm_plane_state *state; }; @@ -874,24 +1535,114 @@ struct drm_plane { /** * struct drm_bridge_funcs - drm_bridge control functions * @attach: Called during drm_bridge_attach - * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge - * @disable: Called right before encoder prepare, disables the bridge - * @post_disable: Called right after encoder prepare, for lockstepped disable - * @mode_set: Set this mode to the bridge - * @pre_enable: Called right before encoder commit, for lockstepped commit - * @enable: Called right after encoder commit, enables the bridge */ struct drm_bridge_funcs { int (*attach)(struct drm_bridge *bridge); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The paramater + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in @state parameter. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ bool (*mode_fixup)(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->disable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + */ void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->post_disable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * singals) feeding it is no longer running when this callback is + * called. + */ void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the ->mode_set() callback for the preceding element in the + * display pipeline has been called already. The display pipe (i.e. + * clocks and timing signals) is off when this function is called. + */ void (*mode_set)(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->pre_enable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + */ void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->enable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + */ void (*enable)(struct drm_bridge *bridge); }; @@ -922,7 +1673,7 @@ struct drm_bridge { * struct drm_atomic_state - the global state object for atomic updates * @dev: parent DRM device * @allow_modeset: allow full modeset - * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @planes: pointer to array of plane pointers * @plane_states: pointer to array of plane states pointers * @crtcs: pointer to array of CRTC pointers @@ -977,31 +1728,265 @@ struct drm_mode_set { /** * struct drm_mode_config_funcs - basic driver provided mode setting functions - * @fb_create: create a new framebuffer object - * @output_poll_changed: function to handle output configuration changes - * @atomic_check: check whether a given atomic state update is possible - * @atomic_commit: commit an atomic state update previously verified with - * atomic_check() - * @atomic_state_alloc: allocate a new atomic state - * @atomic_state_clear: clear the atomic state - * @atomic_state_free: free the atomic state * * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that * involve drivers. */ struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * struct &drm_mode_fb_cmd2 for details. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ void (*output_poll_changed)(struct drm_device *dev); + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the + * ->atomic_check() stage? + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ int (*atomic_check)(struct drm_device *dev, - struct drm_atomic_state *a); + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Asynchronous commits (as indicated with the async parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Asynchronous commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a struct &drm_event in the event + * member of each CRTC's &drm_crtc_state structure. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the atomic commit. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if an asynchronous updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ int (*atomic_commit)(struct drm_device *dev, - struct drm_atomic_state *a, + struct drm_atomic_state *state, bool async); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_lock_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + */ void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call drm_atomic_state_default_free() + * to release common resources. + */ void (*atomic_state_free)(struct drm_atomic_state *state); }; @@ -1010,7 +1995,7 @@ struct drm_mode_config_funcs { * @mutex: mutex protecting KMS related lists and structures * @connection_mutex: ww mutex protecting connector state and routing * @acquire_ctx: global implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * @idr_mutex: mutex for KMS ID allocation and management * @crtc_idr: main KMS ID tracking object * @fb_lock: mutex to protect fb state and lists @@ -1166,7 +2151,7 @@ struct drm_mode_config { */ #define drm_for_each_plane_mask(plane, dev, plane_mask) \ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ - if ((plane_mask) & (1 << drm_plane_index(plane))) + for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) @@ -1183,11 +2168,13 @@ struct drm_prop_enum_list { char *name; }; -extern int drm_crtc_init_with_planes(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_plane *primary, - struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs); +extern __printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); extern void drm_crtc_cleanup(struct drm_crtc *crtc); extern unsigned int drm_crtc_index(struct drm_crtc *crtc); @@ -1233,10 +2220,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, void drm_bridge_pre_enable(struct drm_bridge *bridge); void drm_bridge_enable(struct drm_bridge *bridge); -extern int drm_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - const struct drm_encoder_funcs *funcs, - int encoder_type); +extern __printf(5, 6) +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -1251,13 +2239,15 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); } -extern int drm_universal_plane_init(struct drm_device *dev, - struct drm_plane *plane, - unsigned long possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, - unsigned int format_count, - enum drm_plane_type type); +extern __printf(8, 9) +int drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + enum drm_plane_type type, + const char *name, ...); extern int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, @@ -1543,7 +2533,7 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev, /* Plane list iterator for legacy (overlay only) planes. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 3febb4b9fce9..4b37afa2b73b 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -40,148 +40,7 @@ #include <linux/fb.h> #include <drm/drm_crtc.h> - -enum mode_set_atomic { - LEAVE_ATOMIC_MODE_SET, - ENTER_ATOMIC_MODE_SET, -}; - -/** - * struct drm_crtc_helper_funcs - helper operations for CRTCs - * @dpms: set power state - * @prepare: prepare the CRTC, called before @mode_set - * @commit: commit changes to CRTC, called after @mode_set - * @mode_fixup: try to fixup proposed mode for this CRTC - * @mode_set: set this mode - * @mode_set_nofb: set mode only (no scanout buffer attached) - * @mode_set_base: update the scanout buffer - * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) - * @load_lut: load color palette - * @disable: disable CRTC when no longer in use - * @enable: enable CRTC - * @atomic_check: check for validity of an atomic state - * @atomic_begin: begin atomic update - * @atomic_flush: flush atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_crtc_helper_funcs { - /* - * Control power levels on the CRTC. If the mode passed in is - * unsupported, the provider must use the next lowest power level. - */ - void (*dpms)(struct drm_crtc *crtc, int mode); - void (*prepare)(struct drm_crtc *crtc); - void (*commit)(struct drm_crtc *crtc); - - /* Provider can fixup or change mode timings before modeset occurs */ - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - /* Actually set the mode */ - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - /* Actually set the mode for atomic helpers, optional */ - void (*mode_set_nofb)(struct drm_crtc *crtc); - - /* Move the crtc on the current fb to the given position *optional* */ - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb); - int (*mode_set_base_atomic)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y, - enum mode_set_atomic); - - /* reload the current crtc LUT */ - void (*load_lut)(struct drm_crtc *crtc); - - void (*disable)(struct drm_crtc *crtc); - void (*enable)(struct drm_crtc *crtc); - - /* atomic helpers */ - int (*atomic_check)(struct drm_crtc *crtc, - struct drm_crtc_state *state); - void (*atomic_begin)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); - void (*atomic_flush)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); -}; - -/** - * struct drm_encoder_helper_funcs - helper operations for encoders - * @dpms: set power state - * @save: save connector state - * @restore: restore connector state - * @mode_fixup: try to fixup proposed mode for this connector - * @prepare: part of the disable sequence, called before the CRTC modeset - * @commit: called after the CRTC modeset - * @mode_set: set this mode, optional for atomic helpers - * @get_crtc: return CRTC that the encoder is currently attached to - * @detect: connection status detection - * @disable: disable encoder when not in use (overrides DPMS off) - * @enable: enable encoder - * @atomic_check: check for validity of an atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_encoder_helper_funcs { - void (*dpms)(struct drm_encoder *encoder, int mode); - void (*save)(struct drm_encoder *encoder); - void (*restore)(struct drm_encoder *encoder); - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - void (*prepare)(struct drm_encoder *encoder); - void (*commit)(struct drm_encoder *encoder); - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); - /* detect for DAC style encoders */ - enum drm_connector_status (*detect)(struct drm_encoder *encoder, - struct drm_connector *connector); - void (*disable)(struct drm_encoder *encoder); - - void (*enable)(struct drm_encoder *encoder); - - /* atomic helpers */ - int (*atomic_check)(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state); -}; - -/** - * struct drm_connector_helper_funcs - helper operations for connectors - * @get_modes: get mode list for this connector - * @mode_valid: is this mode valid on the given connector? (optional) - * @best_encoder: return the preferred encoder for this connector - * @atomic_best_encoder: atomic version of @best_encoder - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_connector_helper_funcs { - int (*get_modes)(struct drm_connector *connector); - enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, - struct drm_connector_state *connector_state); -}; +#include <drm/drm_modeset_helper_vtables.h> extern void drm_helper_disable_unused_functions(struct drm_device *dev); extern int drm_crtc_helper_set_config(struct drm_mode_set *set); @@ -197,25 +56,7 @@ extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode); extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd); - -static inline void drm_crtc_helper_add(struct drm_crtc *crtc, - const struct drm_crtc_helper_funcs *funcs) -{ - crtc->helper_private = funcs; -} - -static inline void drm_encoder_helper_add(struct drm_encoder *encoder, - const struct drm_encoder_helper_funcs *funcs) -{ - encoder->helper_private = funcs; -} - -static inline void drm_connector_helper_add(struct drm_connector *connector, - const struct drm_connector_helper_funcs *funcs) -{ - connector->helper_private = funcs; -} + const struct drm_mode_fb_cmd2 *mode_cmd); extern void drm_helper_resume_force_mode(struct drm_device *dev); @@ -229,10 +70,6 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); -extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector - *connector, - uint32_t maxX, - uint32_t maxY); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern bool drm_helper_hpd_irq_event(struct drm_device *dev); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index bb9d0deca07c..1252108da0ef 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -455,16 +455,52 @@ # define DP_EDP_14 0x03 #define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) #define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) #define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 #define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) #define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 5340099741ae..24ab1787b771 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -94,6 +94,7 @@ struct drm_dp_mst_port { struct drm_dp_mst_topology_mgr *mgr; struct edid *cached_edid; /* for DP logical ports - make tiling work */ + bool has_audio; }; /** @@ -215,13 +216,13 @@ struct drm_dp_sideband_msg_rx { struct drm_dp_sideband_msg_hdr initial_hdr; }; - +#define DRM_DP_MAX_SDP_STREAMS 16 struct drm_dp_allocate_payload { u8 port_number; u8 number_sdp_streams; u8 vcpi; u16 pbn; - u8 sdp_stream_sink[8]; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; }; struct drm_dp_allocate_payload_ack_reply { @@ -420,7 +421,7 @@ struct drm_dp_payload { struct drm_dp_mst_topology_mgr { struct device *dev; - struct drm_dp_mst_topology_cbs *cbs; + const struct drm_dp_mst_topology_cbs *cbs; int max_dpcd_transaction_bytes; struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ int max_payloads; @@ -450,9 +451,7 @@ struct drm_dp_mst_topology_mgr { the mstb tx_slots and txmsg->state once they are queued */ struct mutex qlock; struct list_head tx_msg_downq; - struct list_head tx_msg_upq; bool tx_down_in_progress; - bool tx_up_in_progress; /* payload info + lock for it */ struct mutex payload_lock; @@ -484,6 +483,8 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 8b9cc3671858..82cdf611393d 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -95,7 +95,7 @@ struct drm_encoder_slave_funcs { struct drm_encoder_slave { struct drm_encoder base; - struct drm_encoder_slave_funcs *slave_funcs; + const struct drm_encoder_slave_funcs *slave_funcs; void *slave_priv; void *bus_priv; }; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index c54cf3d4a03f..be62bd321e75 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -18,7 +18,7 @@ void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 87b090c4b730..d8a40dff0d1d 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -34,6 +34,11 @@ struct drm_fb_helper; #include <linux/kgdb.h> +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + struct drm_fb_offset { int x, y; }; @@ -74,25 +79,76 @@ struct drm_fb_helper_surface_size { /** * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library - * @gamma_set: Set the given gamma lut register on the given crtc. - * @gamma_get: Read the given gamma lut register on the given crtc, used to - * save the current lut when force-restoring the fbdev for e.g. - * kdbg. - * @fb_probe: Driver callback to allocate and initialize the fbdev info - * structure. Furthermore it also needs to allocate the drm - * framebuffer used to back the fbdev. - * @initial_config: Setup an initial fbdev display configuration * * Driver callbacks used by the fbdev emulation helper library. */ struct drm_fb_helper_funcs { + /** + * @gamma_set: + * + * Set the given gamma LUT register on the given CRTC. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); + /** + * @gamma_get: + * + * Read the given gamma LUT register on the given CRTC, used to save the + * current LUT when force-restoring the fbdev for e.g. kdbg. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ int (*fb_probe)(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); + + /** + * @initial_config: + * + * Driver callback to setup an initial fbdev display configuration. + * Drivers can use this callback to tell the fbdev emulation what the + * preferred initial configuration is. This is useful to implement + * smooth booting where the fbdev (and subsequently all userspace) never + * changes the mode, but always inherits the existing configuration. + * + * This callback is optional. + * + * RETURNS: + * + * The driver should return true if a suitable initial configuration has + * been filled out and false when the fbdev helper should fall back to + * the default probing logic. + */ bool (*initial_config)(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, @@ -105,18 +161,22 @@ struct drm_fb_helper_connector { }; /** - * struct drm_fb_helper - helper to emulate fbdev on top of kms - * @fb: Scanout framebuffer object - * @dev: DRM device + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS + * @fb: Scanout framebuffer object + * @dev: DRM device * @crtc_count: number of possible CRTCs * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) * @connector_count: number of connected connectors * @connector_info_alloc_count: size of connector_info + * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors - * @kernel_fb_list: list_head in kernel_fb_helper_list - * @delayed_hotplug: was there a hotplug while kms master active? + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a struct &drm_fb_helper_funcs with a few + * operations. */ struct drm_fb_helper { struct drm_framebuffer *fb; @@ -129,10 +189,21 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ struct list_head kernel_fb_list; - /* we got a hotplug but fbdev wasn't running the console - delay until next set_par */ + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ bool delayed_hotplug; /** diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 15e7f007380f..0b3e11ab8757 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,76 +35,129 @@ */ /** - * This structure defines the drm_mm memory object, which will be used by the - * DRM for its buffer objects. + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. */ struct drm_gem_object { - /** Reference count of this object */ + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_reference() to acquire and + * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked() + * to release a reference to a GEM buffer object. + */ struct kref refcount; /** - * handle_count - gem file_priv handle count of this object + * @handle_count: + * + * This is the GEM file_priv handle count of this object. * * Each handle also holds a reference. Note that when the handle_count * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * * Protected by dev->object_name_lock. - * */ + */ unsigned handle_count; - /** Related drm device */ + /** + * @dev: DRM dev this object belongs to. + */ struct drm_device *dev; - /** File representing the shmem storage */ + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous CMA memory, special reserved blocks). In this + * case @filp is NULL. + */ struct file *filp; - /* Mapping info for this object */ + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ struct drm_vma_offset_node vma_node; /** + * @size: + * * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** + * @name: + * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by the object_name_lock in the related drm_device + * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK + * and GEM_OPEN ioctls. */ int name; /** - * Memory domains. These monitor which caches contain read/write data + * @read_domains: + * + * Read memory domains. These monitor which caches contain read/write data * related to the object. When transitioning from one set of domains * to another, the driver is called to ensure that caches are suitably - * flushed and invalidated + * flushed and invalidated. */ uint32_t read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ uint32_t write_domain; /** + * @pending_read_domains: + * * While validating an exec operation, the * new read/write domain values are computed here. * They will be transferred to the above values * at the point that any cache flushing occurs */ uint32_t pending_read_domains; + + /** + * @pending_write_domain: Write domain similar to @pending_read_domains. + */ uint32_t pending_write_domain; /** - * dma_buf - dma buf associated with this GEM object + * @dma_buf: + * + * dma-buf associated with this GEM object. * * Pointer to the dma-buf associated with this gem object (either * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by obj->object_name_lock + * Protected by obj->object_name_lock. */ struct dma_buf *dma_buf; /** - * import_attach - dma buf attachment backing this object + * @import_attach: + * + * dma-buf attachment backing this object. * * Any foreign dma_buf imported as a gem object has this set to the * attachment point for the device. This is invariant over the lifetime @@ -133,12 +186,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +/** + * drm_gem_object_reference - acquire a GEM BO reference + * @obj: GEM buffer object + * + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ static inline void drm_gem_object_reference(struct drm_gem_object *obj) { kref_get(&obj->refcount); } +/** + * drm_gem_object_unreference - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must hold the dev->struct_mutex + * lock when calling this function, even when the driver doesn't use + * dev->struct_mutex for anything. + * + * For drivers not encumbered with legacy locking use + * drm_gem_object_unreference_unlocked() instead. + */ static inline void drm_gem_object_unreference(struct drm_gem_object *obj) { @@ -149,6 +220,13 @@ drm_gem_object_unreference(struct drm_gem_object *obj) } } +/** + * drm_gem_object_unreference_unlocked - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must not hold the + * dev->struct_mutex lock when calling this function. + */ static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index f1d8d0dbb4f1..1b3b1f8c8cdf 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -163,9 +163,36 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) return container_of(dev, struct mipi_dsi_device, dev); } +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); int mipi_dsi_attach(struct mipi_dsi_device *dsi); int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0de6290df4da..fc65118e5077 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -148,8 +148,7 @@ static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { - return list_entry(hole_node->node_list.next, - struct drm_mm_node, node_list)->start; + return list_next_entry(hole_node, node_list)->start; } /** @@ -180,6 +179,14 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) &(mm)->head_node.node_list, \ node_list) +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ + for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ + &entry->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(entry), \ + hole_end = drm_mm_hole_node_end(entry), \ + 1 : 0; \ + entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + /** * drm_mm_for_each_hole - iterator to walk over all holes * @entry: drm_mm_node used internally to track progress @@ -200,20 +207,7 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) * going backwards. */ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ - for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) - -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ - for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) /* * Basic range manager support (drm_mm.c) diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 08a8cac9e555..625966a906f2 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -35,46 +35,91 @@ * structures). */ +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal horizontal timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_STALE: mode has become stale + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ enum drm_mode_status { - MODE_OK = 0, /* Mode OK */ - MODE_HSYNC, /* hsync out of range */ - MODE_VSYNC, /* vsync out of range */ - MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_BAD_WIDTH, /* requires an unsupported linepitch */ - MODE_NOMODE, /* no mode with a matching name */ - MODE_NO_INTERLACE, /* interlaced mode not supported */ - MODE_NO_DBLESCAN, /* doublescan mode not supported */ - MODE_NO_VSCAN, /* multiscan mode not supported */ - MODE_MEM, /* insufficient video memory */ - MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ - MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ - MODE_MEM_VIRT, /* insufficient video memory given virtual size */ - MODE_NOCLOCK, /* no fixed clock available */ - MODE_CLOCK_HIGH, /* clock required is too high */ - MODE_CLOCK_LOW, /* clock required is too low */ - MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ - MODE_BAD_HVALUE, /* horizontal timing was out of range */ - MODE_BAD_VVALUE, /* vertical timing was out of range */ - MODE_BAD_VSCAN, /* VScan value out of range */ - MODE_HSYNC_NARROW, /* horizontal sync too narrow */ - MODE_HSYNC_WIDE, /* horizontal sync too wide */ - MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ - MODE_HBLANK_WIDE, /* horizontal blanking too wide */ - MODE_VSYNC_NARROW, /* vertical sync too narrow */ - MODE_VSYNC_WIDE, /* vertical sync too wide */ - MODE_VBLANK_NARROW, /* vertical blanking too narrow */ - MODE_VBLANK_WIDE, /* vertical blanking too wide */ - MODE_PANEL, /* exceeds panel dimensions */ - MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ - MODE_ONE_WIDTH, /* only one width is supported */ - MODE_ONE_HEIGHT, /* only one height is supported */ - MODE_ONE_SIZE, /* only one resolution is supported */ - MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ - MODE_NO_STEREO, /* stereo modes not supported */ - MODE_UNVERIFIED = -3, /* mode needs to reverified */ - MODE_BAD = -2, /* unspecified reason */ - MODE_ERROR = -1 /* error condition */ + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_STALE = -3, + MODE_BAD = -2, + MODE_ERROR = -1 }; #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ @@ -96,17 +141,125 @@ enum drm_mode_status { #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * The horizontal and vertical timings are defined per the following diagram. + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + */ struct drm_display_mode { - /* Header */ + /** + * @head: + * + * struct list_head for mode lists. + */ struct list_head head; + + /** + * @base: + * + * A display mode is a normal modeset object, possibly including public + * userspace id. + * + * FIXME: + * + * This can probably be removed since the entire concept of userspace + * managing modes explicitly has never landed in upstream kernel mode + * setting support. + */ struct drm_mode_object base; + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ char name[DRM_DISPLAY_MODE_LEN]; + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ enum drm_mode_status status; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively + * unused. + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI: + * + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige + * from older kms designs where userspace had to first add a custom + * mode to the kernel's mode list before it could use it. Don't use. + */ unsigned int type; - /* Proposed mode values */ + /** + * @clock: + * + * Pixel clock in kHz. + */ int clock; /* in kHz */ int hdisplay; int hsync_start; @@ -118,14 +271,74 @@ struct drm_display_mode { int vsync_end; int vtotal; int vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: not used? + * - DRM_MODE_FLAG_PIXMUX: not used? + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ unsigned int flags; - /* Addressable image size (may be 0 for projectors, etc.) */ + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int height_mm; - /* Actual mode we give to hw */ - int crtc_clock; /* in KHz */ + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; int crtc_hdisplay; int crtc_hblank_start; int crtc_hblank_end; @@ -140,12 +353,48 @@ struct drm_display_mode { int crtc_vsync_end; int crtc_vtotal; - /* Driver private mode info */ + /** + * @private: + * + * Pointer for driver private data. This can only be used for mode + * objects passed to drivers in modeset operations. It shouldn't be used + * by atomic drivers since they can store any additional data by + * subclassing state structures. + */ int *private; + + /** + * @private_flags: + * + * Similar to @private, but just an integer. + */ int private_flags; - int vrefresh; /* in Hz */ - int hsync; /* in kHz */ + /** + * @vrefresh: + * + * Vertical refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in Hz. + */ + int vrefresh; + + /** + * @hsync: + * + * Horizontal refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in kHz. + */ + int hsync; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ enum hdmi_picture_aspect picture_aspect_ratio; }; @@ -222,6 +471,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); @@ -232,7 +483,7 @@ enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); -void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); +void drm_mode_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 000000000000..a126a0d7aed4 --- /dev/null +++ b/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,928 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include <drm/drm_crtc.h> + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_crtc_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_crtc *crtc); + + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge + * ->mode_fixup() functions. If the CRTC cannot support the requested + * conversion from mode to adjusted_mode it should reject the modeset. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @enable callback. + * + * This callback is optional. + */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the ->mode_set() callback. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /** + * @load_lut: + * + * Load a LUT prepared with the @gamma_set functions from + * &drm_fb_helper_funcs. + * + * This callback is optional and is only used by the fbdev emulation + * helpers. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ + void (*load_lut)(struct drm_crtc *crtc); + + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own ->disable hook. If that + * sequence is too simple drivers can just add their own hooks and call + * it from this CRTC callback here by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_crtc *crtc); + + /** + * @enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own ->enable hook. If that sequence is + * too simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the CRTC level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_crtc *crtc); + + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() + * hooks are called after the ones for planes, which allows drivers to + * assign shared resources requested by planes in the CRTC callback + * here. For more complicated dependencies the driver can call the provided + * check helpers multiple times until the computed state has a final + * configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the ->enable() callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + */ + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @get_crtc: + * + * This callback is used by the legacy CRTC helpers to work around + * deficiencies in its own book-keeping. + * + * Do not use, use atomic helpers instead, which get the book keeping + * right. + * + * FIXME: + * + * Currently only nouveau is using this, and as soon as nouveau is + * atomic we can ditch this hook. + */ + struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using the CRTC's own ->disable hook. If that sequence is too simple + * drivers can just add their own driver private encoder hooks and call + * them from CRTC's callback by looping over all encoders connected to + * it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_encoder *encoder); + + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * the CRTC's own ->enable hook. If that sequence is too simple drivers + * can just add their own driver private encoder hooks and call them + * from CRTC's callback by looping over all encoders connected to it + * using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the encoder level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_encoder *encoder); + + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for an encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. + */ +struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the connector->probed_modes list. It should also update the + * EDID property by calling drm_mode_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that driver a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the @display_info, @width_mm and @height_mm fields of the + * struct #drm_connector are filled in. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * Finally drivers that support audio probably want to update the ELD + * data, too, using drm_edid_to_eld(). + * + * This function is only called after the ->detect() hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_connector *connector); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and + * ask the kernel to use them. It this case the atomic helpers or legacy + * CRTC helpers will not call this function. Drivers therefore must + * still fully validate any mode passed in in a modeset request. + * + * RETURNS: + * + * Either MODE_OK or one of the failure reasons in enum + * &drm_mode_status. + */ + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset() and either + * this or @best_encoder is required. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for planes + * + * These functions are used by the atomic helpers and by the transitional plane + * helpers. + */ +struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * it's backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the fence member of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the atomic_commit hook in &drm_mode_config_funcs. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ + int (*prepare_fb)(struct drm_plane *plane, + const struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*cleanup_fb)(struct drm_plane *plane, + const struct drm_plane_state *old_state); + + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's ->atomic_check() + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * ->disable() hook in &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +#endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 94938d89347c..c5576fbcb909 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); struct drm_modeset_acquire_ctx * drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); -int drm_modeset_lock_all_crtcs(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 5a7f9d4efb1d..4421f3f4ca8d 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,6 +26,7 @@ #include <drm/drm_rect.h> #include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> /* * Drivers that don't allow primary plane scaling may pass this macro in place @@ -36,46 +37,9 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -/** - * DOC: plane helpers - * - * Helper functions to assist with creation and handling of CRTC primary - * planes. - */ - int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); -/** - * drm_plane_helper_funcs - helper operations for CRTCs - * @prepare_fb: prepare a framebuffer for use by the plane - * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane - * @atomic_check: check that a given atomic state is valid and can be applied - * @atomic_update: apply an atomic state to the plane (mandatory) - * @atomic_disable: disable the plane - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_plane_helper_funcs { - int (*prepare_fb)(struct drm_plane *plane, - const struct drm_plane_state *new_state); - void (*cleanup_fb)(struct drm_plane *plane, - const struct drm_plane_state *old_state); - - int (*atomic_check)(struct drm_plane *plane, - struct drm_plane_state *state); - void (*atomic_update)(struct drm_plane *plane, - struct drm_plane_state *old_state); - void (*atomic_disable)(struct drm_plane *plane, - struct drm_plane_state *old_state); -}; - -static inline void drm_plane_helper_add(struct drm_plane *plane, - const struct drm_plane_helper_funcs *funcs) -{ - plane->helper_private = funcs; -} - int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 26bb55e9e8b6..83bb156d4356 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -162,7 +162,8 @@ int drm_rect_calc_hscale_relaxed(struct drm_rect *src, int drm_rect_calc_vscale_relaxed(struct drm_rect *src, struct drm_rect *dst, int min_vscale, int max_vscale); -void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); void drm_rect_rotate(struct drm_rect *r, int width, int height, unsigned int rotation); diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 30d89e0da2c6..b46fa0ef3005 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -31,47 +31,94 @@ #define MAX_PORTS 5 /** - * struct i915_audio_component_ops - callbacks defined in gfx driver - * @owner: the module owner - * @get_power: get the POWER_DOMAIN_AUDIO power well - * @put_power: put the POWER_DOMAIN_AUDIO power well - * @codec_wake_override: Enable/Disable generating the codec wake signal - * @get_cdclk_freq: get the Core Display Clock in KHz - * @sync_audio_rate: set n/cts based on the sample rate + * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver */ struct i915_audio_component_ops { + /** + * @owner: i915 module + */ struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ int (*sync_audio_rate)(struct device *, int port, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, bool *enabled, + unsigned char *buf, int max_bytes); }; +/** + * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver + */ struct i915_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ void *audio_ptr; /** - * Call from i915 driver, notifying the HDA driver that - * pin sense and/or ELD information has changed. - * @audio_ptr: HDA driver object - * @port: Which port has changed (PORTA / PORTB / PORTC etc) + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the i915 driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). */ void (*pin_eld_notify)(void *audio_ptr, int port); }; /** - * struct i915_audio_component - used for audio video interaction - * @dev: the device from gfx driver - * @aud_sample_rate: the array of audio sample rate per port - * @ops: callback for audio driver calling - * @audio_ops: Call from i915 driver + * struct i915_audio_component - Used for direct communication between i915 and hda drivers */ struct i915_audio_component { + /** + * @dev: i915 device, used as parameter for ops + */ struct device *dev; + /** + * @aud_sample_rate: the array of audio sample rate per port + */ int aud_sample_rate[MAX_PORTS]; - + /** + * @ops: Ops implemented by i915 driver, called by hda driver + */ const struct i915_audio_component_ops *ops; - + /** + * @audio_ops: Ops implemented by hda driver, called by i915 driver + */ const struct i915_audio_component_audio_ops *audio_ops; }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 17c445612e01..f97020904717 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -279,16 +279,59 @@ #define INTEL_SKL_GT3_IDS(info) \ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \ + INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ -#define INTEL_SKL_IDS(info) \ +#define INTEL_SKL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ + +#define INTEL_SKL_IDS(info) \ INTEL_SKL_GT1_IDS(info), \ INTEL_SKL_GT2_IDS(info), \ - INTEL_SKL_GT3_IDS(info) + INTEL_SKL_GT3_IDS(info), \ + INTEL_SKL_GT4_IDS(info) #define INTEL_BXT_IDS(info) \ INTEL_VGA_DEVICE(0x0A84, info), \ INTEL_VGA_DEVICE(0x1A84, info), \ INTEL_VGA_DEVICE(0x5A84, info) +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c768ddfbe53c..afae2316bd43 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -383,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); */ extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +/** + * ttm_bo_move_to_lru_tail + * + * @bo: The buffer object. + * + * Move this BO to the tail of all lru lists used to lookup and reserve an + * object. This function must be called with struct ttm_bo_global::lru_lock + * held, and is used to make a BO less likely to be considered for eviction. + */ +extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 813042cede57..3d4bf08aa21f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * reserved, the validation sequence is checked against the validation * sequence of the process currently reserving the buffer, * and if the current validation sequence is greater than that of the process - * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps * waiting for the buffer to become unreserved, after which it retries * reserving. - * The caller should, when receiving an -EAGAIN error + * The caller should, when receiving an -EDEADLK error * release all its buffer reservations, wait for @bo to become unreserved, and * then rerun the validation with the same validation sequence. This procedure * will always guarantee that the process with the lowest validation sequence diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h new file mode 100644 index 000000000000..d91156e2658d --- /dev/null +++ b/include/dt-bindings/clock/bcm2835-aux.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define BCM2835_AUX_CLOCK_UART 0 +#define BCM2835_AUX_CLOCK_SPI1 1 +#define BCM2835_AUX_CLOCK_SPI2 2 +#define BCM2835_AUX_CLOCK_COUNT 3 diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index d323efac7edf..61f1d20c2a67 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -43,5 +43,6 @@ #define BCM2835_CLOCK_TSENS 27 #define BCM2835_CLOCK_EMMC 28 #define BCM2835_CLOCK_PERI_IMAGE 29 +#define BCM2835_CLOCK_PWM 30 -#define BCM2835_CLOCK_COUNT 30 +#define BCM2835_CLOCK_COUNT 31 diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 99da0d117a7d..7699ee9c16c0 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -25,6 +25,8 @@ #define CLK_FOUT_MPLL 10 #define CLK_FOUT_BPLL 11 #define CLK_FOUT_KPLL 12 +#define CLK_ARM_CLK 13 +#define CLK_KFC_CLK 14 /* gate for special clocks (sclk) */ #define CLK_SCLK_UART0 128 @@ -210,6 +212,8 @@ #define CLK_MOUT_SW_ACLK300 649 #define CLK_MOUT_USER_ACLK400_DISP1 650 #define CLK_MOUT_SW_ACLK400 651 +#define CLK_MOUT_USER_ACLK300_GSCL 652 +#define CLK_MOUT_SW_ACLK300_GSCL 653 /* divider clocks */ #define CLK_DOUT_PIXEL 768 diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index a4a7a9ce3457..edca8985c50e 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -447,5 +447,6 @@ #define IMX7D_SEMA4_HS_ROOT_CLK 434 #define IMX7D_PLL_DRAM_TEST_DIV 435 #define IMX7D_ADC_ROOT_CLK 436 -#define IMX7D_CLK_END 437 +#define IMX7D_CLK_ARM 437 +#define IMX7D_CLK_END 438 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/lpc32xx-clock.h b/include/dt-bindings/clock/lpc32xx-clock.h new file mode 100644 index 000000000000..bcb1c9a73519 --- /dev/null +++ b/include/dt-bindings/clock/lpc32xx-clock.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015 Vladimir Zapolskiy <vz@mleia.com> + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +#ifndef __DT_BINDINGS_LPC32XX_CLOCK_H +#define __DT_BINDINGS_LPC32XX_CLOCK_H + +/* LPC32XX System Control Block clocks */ +#define LPC32XX_CLK_RTC 1 +#define LPC32XX_CLK_DMA 2 +#define LPC32XX_CLK_MLC 3 +#define LPC32XX_CLK_SLC 4 +#define LPC32XX_CLK_LCD 5 +#define LPC32XX_CLK_MAC 6 +#define LPC32XX_CLK_SD 7 +#define LPC32XX_CLK_DDRAM 8 +#define LPC32XX_CLK_SSP0 9 +#define LPC32XX_CLK_SSP1 10 +#define LPC32XX_CLK_UART3 11 +#define LPC32XX_CLK_UART4 12 +#define LPC32XX_CLK_UART5 13 +#define LPC32XX_CLK_UART6 14 +#define LPC32XX_CLK_IRDA 15 +#define LPC32XX_CLK_I2C1 16 +#define LPC32XX_CLK_I2C2 17 +#define LPC32XX_CLK_TIMER0 18 +#define LPC32XX_CLK_TIMER1 19 +#define LPC32XX_CLK_TIMER2 20 +#define LPC32XX_CLK_TIMER3 21 +#define LPC32XX_CLK_TIMER4 22 +#define LPC32XX_CLK_TIMER5 23 +#define LPC32XX_CLK_WDOG 24 +#define LPC32XX_CLK_I2S0 25 +#define LPC32XX_CLK_I2S1 26 +#define LPC32XX_CLK_SPI1 27 +#define LPC32XX_CLK_SPI2 28 +#define LPC32XX_CLK_MCPWM 29 +#define LPC32XX_CLK_HSTIMER 30 +#define LPC32XX_CLK_KEY 31 +#define LPC32XX_CLK_PWM1 32 +#define LPC32XX_CLK_PWM2 33 +#define LPC32XX_CLK_ADC 34 + +/* LPC32XX USB clocks */ +#define LPC32XX_USB_CLK_I2C 1 +#define LPC32XX_USB_CLK_DEVICE 2 +#define LPC32XX_USB_CLK_HOST 3 + +#endif /* __DT_BINDINGS_LPC32XX_CLOCK_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h new file mode 100644 index 000000000000..888e75ce8fec --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8996_H +#define _DT_BINDINGS_CLK_MSM_GCC_8996_H + +#define GPLL0_EARLY 0 +#define GPLL0 1 +#define GPLL1_EARLY 2 +#define GPLL1 3 +#define GPLL2_EARLY 4 +#define GPLL2 5 +#define GPLL3_EARLY 6 +#define GPLL3 7 +#define GPLL4_EARLY 8 +#define GPLL4 9 +#define SYSTEM_NOC_CLK_SRC 10 +#define CONFIG_NOC_CLK_SRC 11 +#define PERIPH_NOC_CLK_SRC 12 +#define MMSS_BIMC_GFX_CLK_SRC 13 +#define USB30_MASTER_CLK_SRC 14 +#define USB30_MOCK_UTMI_CLK_SRC 15 +#define USB3_PHY_AUX_CLK_SRC 16 +#define USB20_MASTER_CLK_SRC 17 +#define USB20_MOCK_UTMI_CLK_SRC 18 +#define SDCC1_APPS_CLK_SRC 19 +#define SDCC1_ICE_CORE_CLK_SRC 20 +#define SDCC2_APPS_CLK_SRC 21 +#define SDCC3_APPS_CLK_SRC 22 +#define SDCC4_APPS_CLK_SRC 23 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 24 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 25 +#define BLSP1_UART1_APPS_CLK_SRC 26 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 30 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 31 +#define BLSP1_UART3_APPS_CLK_SRC 32 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 33 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 34 +#define BLSP1_UART4_APPS_CLK_SRC 35 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 36 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 37 +#define BLSP1_UART5_APPS_CLK_SRC 38 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 39 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 40 +#define BLSP1_UART6_APPS_CLK_SRC 41 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 42 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 43 +#define BLSP2_UART1_APPS_CLK_SRC 44 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 45 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 46 +#define BLSP2_UART2_APPS_CLK_SRC 47 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 48 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 49 +#define BLSP2_UART3_APPS_CLK_SRC 50 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 51 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 52 +#define BLSP2_UART4_APPS_CLK_SRC 53 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 54 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 55 +#define BLSP2_UART5_APPS_CLK_SRC 56 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 57 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 58 +#define BLSP2_UART6_APPS_CLK_SRC 59 +#define PDM2_CLK_SRC 60 +#define TSIF_REF_CLK_SRC 61 +#define CE1_CLK_SRC 62 +#define GCC_SLEEP_CLK_SRC 63 +#define BIMC_CLK_SRC 64 +#define HMSS_AHB_CLK_SRC 65 +#define BIMC_HMSS_AXI_CLK_SRC 66 +#define HMSS_RBCPR_CLK_SRC 67 +#define HMSS_GPLL0_CLK_SRC 68 +#define GP1_CLK_SRC 69 +#define GP2_CLK_SRC 70 +#define GP3_CLK_SRC 71 +#define PCIE_AUX_CLK_SRC 72 +#define UFS_AXI_CLK_SRC 73 +#define UFS_ICE_CORE_CLK_SRC 74 +#define QSPI_SER_CLK_SRC 75 +#define GCC_SYS_NOC_AXI_CLK 76 +#define GCC_SYS_NOC_HMSS_AHB_CLK 77 +#define GCC_SNOC_CNOC_AHB_CLK 78 +#define GCC_SNOC_PNOC_AHB_CLK 79 +#define GCC_SYS_NOC_AT_CLK 80 +#define GCC_SYS_NOC_USB3_AXI_CLK 81 +#define GCC_SYS_NOC_UFS_AXI_CLK 82 +#define GCC_CFG_NOC_AHB_CLK 83 +#define GCC_PERIPH_NOC_AHB_CLK 84 +#define GCC_PERIPH_NOC_USB20_AHB_CLK 85 +#define GCC_TIC_CLK 86 +#define GCC_IMEM_AXI_CLK 87 +#define GCC_MMSS_SYS_NOC_AXI_CLK 88 +#define GCC_MMSS_NOC_CFG_AHB_CLK 89 +#define GCC_MMSS_BIMC_GFX_CLK 90 +#define GCC_USB30_MASTER_CLK 91 +#define GCC_USB30_SLEEP_CLK 92 +#define GCC_USB30_MOCK_UTMI_CLK 93 +#define GCC_USB3_PHY_AUX_CLK 94 +#define GCC_USB3_PHY_PIPE_CLK 95 +#define GCC_USB20_MASTER_CLK 96 +#define GCC_USB20_SLEEP_CLK 97 +#define GCC_USB20_MOCK_UTMI_CLK 98 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 99 +#define GCC_SDCC1_APPS_CLK 100 +#define GCC_SDCC1_AHB_CLK 101 +#define GCC_SDCC1_ICE_CORE_CLK 102 +#define GCC_SDCC2_APPS_CLK 103 +#define GCC_SDCC2_AHB_CLK 104 +#define GCC_SDCC3_APPS_CLK 105 +#define GCC_SDCC3_AHB_CLK 106 +#define GCC_SDCC4_APPS_CLK 107 +#define GCC_SDCC4_AHB_CLK 108 +#define GCC_BLSP1_AHB_CLK 109 +#define GCC_BLSP1_SLEEP_CLK 110 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 111 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 112 +#define GCC_BLSP1_UART1_APPS_CLK 113 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 114 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 115 +#define GCC_BLSP1_UART2_APPS_CLK 116 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 117 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 118 +#define GCC_BLSP1_UART3_APPS_CLK 119 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 120 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 121 +#define GCC_BLSP1_UART4_APPS_CLK 122 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 123 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 124 +#define GCC_BLSP1_UART5_APPS_CLK 125 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 126 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 127 +#define GCC_BLSP1_UART6_APPS_CLK 128 +#define GCC_BLSP2_AHB_CLK 129 +#define GCC_BLSP2_SLEEP_CLK 130 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 131 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 132 +#define GCC_BLSP2_UART1_APPS_CLK 133 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 134 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 135 +#define GCC_BLSP2_UART2_APPS_CLK 136 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 137 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 138 +#define GCC_BLSP2_UART3_APPS_CLK 139 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 140 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 141 +#define GCC_BLSP2_UART4_APPS_CLK 142 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 143 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 144 +#define GCC_BLSP2_UART5_APPS_CLK 145 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 146 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 147 +#define GCC_BLSP2_UART6_APPS_CLK 148 +#define GCC_PDM_AHB_CLK 149 +#define GCC_PDM_XO4_CLK 150 +#define GCC_PDM2_CLK 151 +#define GCC_PRNG_AHB_CLK 152 +#define GCC_TSIF_AHB_CLK 153 +#define GCC_TSIF_REF_CLK 154 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 155 +#define GCC_TCSR_AHB_CLK 156 +#define GCC_BOOT_ROM_AHB_CLK 157 +#define GCC_MSG_RAM_AHB_CLK 158 +#define GCC_TLMM_AHB_CLK 159 +#define GCC_TLMM_CLK 160 +#define GCC_MPM_AHB_CLK 161 +#define GCC_SPMI_SER_CLK 162 +#define GCC_SPMI_CNOC_AHB_CLK 163 +#define GCC_CE1_CLK 164 +#define GCC_CE1_AXI_CLK 165 +#define GCC_CE1_AHB_CLK 166 +#define GCC_BIMC_HMSS_AXI_CLK 167 +#define GCC_BIMC_GFX_CLK 168 +#define GCC_HMSS_AHB_CLK 169 +#define GCC_HMSS_SLV_AXI_CLK 170 +#define GCC_HMSS_MSTR_AXI_CLK 171 +#define GCC_HMSS_RBCPR_CLK 172 +#define GCC_GP1_CLK 173 +#define GCC_GP2_CLK 174 +#define GCC_GP3_CLK 175 +#define GCC_PCIE_0_SLV_AXI_CLK 176 +#define GCC_PCIE_0_MSTR_AXI_CLK 177 +#define GCC_PCIE_0_CFG_AHB_CLK 178 +#define GCC_PCIE_0_AUX_CLK 179 +#define GCC_PCIE_0_PIPE_CLK 180 +#define GCC_PCIE_1_SLV_AXI_CLK 181 +#define GCC_PCIE_1_MSTR_AXI_CLK 182 +#define GCC_PCIE_1_CFG_AHB_CLK 183 +#define GCC_PCIE_1_AUX_CLK 184 +#define GCC_PCIE_1_PIPE_CLK 185 +#define GCC_PCIE_2_SLV_AXI_CLK 186 +#define GCC_PCIE_2_MSTR_AXI_CLK 187 +#define GCC_PCIE_2_CFG_AHB_CLK 188 +#define GCC_PCIE_2_AUX_CLK 189 +#define GCC_PCIE_2_PIPE_CLK 190 +#define GCC_PCIE_PHY_CFG_AHB_CLK 191 +#define GCC_PCIE_PHY_AUX_CLK 192 +#define GCC_UFS_AXI_CLK 193 +#define GCC_UFS_AHB_CLK 194 +#define GCC_UFS_TX_CFG_CLK 195 +#define GCC_UFS_RX_CFG_CLK 196 +#define GCC_UFS_TX_SYMBOL_0_CLK 197 +#define GCC_UFS_RX_SYMBOL_0_CLK 198 +#define GCC_UFS_RX_SYMBOL_1_CLK 199 +#define GCC_UFS_UNIPRO_CORE_CLK 200 +#define GCC_UFS_ICE_CORE_CLK 201 +#define GCC_UFS_SYS_CLK_CORE_CLK 202 +#define GCC_UFS_TX_SYMBOL_CLK_CORE_CLK 203 +#define GCC_AGGRE0_SNOC_AXI_CLK 204 +#define GCC_AGGRE0_CNOC_AHB_CLK 205 +#define GCC_SMMU_AGGRE0_AXI_CLK 206 +#define GCC_SMMU_AGGRE0_AHB_CLK 207 +#define GCC_AGGRE1_PNOC_AHB_CLK 208 +#define GCC_AGGRE2_UFS_AXI_CLK 209 +#define GCC_AGGRE2_USB3_AXI_CLK 210 +#define GCC_QSPI_AHB_CLK 211 +#define GCC_QSPI_SER_CLK 212 +#define GCC_USB3_CLKREF_CLK 213 +#define GCC_HDMI_CLKREF_CLK 214 +#define GCC_UFS_CLKREF_CLK 215 +#define GCC_PCIE_CLKREF_CLK 216 +#define GCC_RX2_USB2_CLKREF_CLK 217 +#define GCC_RX1_USB2_CLKREF_CLK 218 + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_PIMEM_BCR 5 +#define GCC_QDSS_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB_20_BCR 8 +#define GCC_QUSB2PHY_PRIM_BCR 9 +#define GCC_QUSB2PHY_SEC_BCR 10 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_PRNG_BCR 43 +#define GCC_TSIF_BCR 44 +#define GCC_TCSR_BCR 45 +#define GCC_BOOT_ROM_BCR 46 +#define GCC_MSG_RAM_BCR 47 +#define GCC_TLMM_BCR 48 +#define GCC_MPM_BCR 49 +#define GCC_SEC_CTRL_BCR 50 +#define GCC_SPMI_BCR 51 +#define GCC_SPDM_BCR 52 +#define GCC_CE1_BCR 53 +#define GCC_BIMC_BCR 54 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 55 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 57 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 59 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 71 +#define GCC_CNOC_BUS_TIMEOUT7_BCR 72 +#define GCC_CNOC_BUS_TIMEOUT8_BCR 73 +#define GCC_CNOC_BUS_TIMEOUT9_BCR 74 +#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 75 +#define GCC_APB2JTAG_BCR 76 +#define GCC_RBCPR_CX_BCR 77 +#define GCC_RBCPR_MX_BCR 78 +#define GCC_PCIE_0_BCR 79 +#define GCC_PCIE_0_PHY_BCR 80 +#define GCC_PCIE_1_BCR 81 +#define GCC_PCIE_1_PHY_BCR 82 +#define GCC_PCIE_2_BCR 83 +#define GCC_PCIE_2_PHY_BCR 84 +#define GCC_PCIE_PHY_BCR 85 +#define GCC_DCD_BCR 86 +#define GCC_OBT_ODT_BCR 87 +#define GCC_UFS_BCR 88 +#define GCC_SSC_BCR 89 +#define GCC_VS_BCR 90 +#define GCC_AGGRE0_NOC_BCR 91 +#define GCC_AGGRE1_NOC_BCR 92 +#define GCC_AGGRE2_NOC_BCR 93 +#define GCC_DCC_BCR 94 +#define GCC_IPA_BCR 95 +#define GCC_QSPI_BCR 96 +#define GCC_SKL_BCR 97 +#define GCC_MSMPU_BCR 98 +#define GCC_MSS_Q6_BCR 99 +#define GCC_QREFS_VBG_CAL_BCR 100 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h new file mode 100644 index 000000000000..9b81ca65fcec --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8996_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8996_H + +#define MMPLL0_EARLY 0 +#define MMPLL0_PLL 1 +#define MMPLL1_EARLY 2 +#define MMPLL1_PLL 3 +#define MMPLL2_EARLY 4 +#define MMPLL2_PLL 5 +#define MMPLL3_EARLY 6 +#define MMPLL3_PLL 7 +#define MMPLL4_EARLY 8 +#define MMPLL4_PLL 9 +#define MMPLL5_EARLY 10 +#define MMPLL5_PLL 11 +#define MMPLL8_EARLY 12 +#define MMPLL8_PLL 13 +#define MMPLL9_EARLY 14 +#define MMPLL9_PLL 15 +#define AHB_CLK_SRC 16 +#define AXI_CLK_SRC 17 +#define MAXI_CLK_SRC 18 +#define DSA_CORE_CLK_SRC 19 +#define GFX3D_CLK_SRC 20 +#define RBBMTIMER_CLK_SRC 21 +#define ISENSE_CLK_SRC 22 +#define RBCPR_CLK_SRC 23 +#define VIDEO_CORE_CLK_SRC 24 +#define VIDEO_SUBCORE0_CLK_SRC 25 +#define VIDEO_SUBCORE1_CLK_SRC 26 +#define PCLK0_CLK_SRC 27 +#define PCLK1_CLK_SRC 28 +#define MDP_CLK_SRC 29 +#define EXTPCLK_CLK_SRC 30 +#define VSYNC_CLK_SRC 31 +#define HDMI_CLK_SRC 32 +#define BYTE0_CLK_SRC 33 +#define BYTE1_CLK_SRC 34 +#define ESC0_CLK_SRC 35 +#define ESC1_CLK_SRC 36 +#define CAMSS_GP0_CLK_SRC 37 +#define CAMSS_GP1_CLK_SRC 38 +#define MCLK0_CLK_SRC 39 +#define MCLK1_CLK_SRC 40 +#define MCLK2_CLK_SRC 41 +#define MCLK3_CLK_SRC 42 +#define CCI_CLK_SRC 43 +#define CSI0PHYTIMER_CLK_SRC 44 +#define CSI1PHYTIMER_CLK_SRC 45 +#define CSI2PHYTIMER_CLK_SRC 46 +#define CSIPHY0_3P_CLK_SRC 47 +#define CSIPHY1_3P_CLK_SRC 48 +#define CSIPHY2_3P_CLK_SRC 49 +#define JPEG0_CLK_SRC 50 +#define JPEG2_CLK_SRC 51 +#define JPEG_DMA_CLK_SRC 52 +#define VFE0_CLK_SRC 53 +#define VFE1_CLK_SRC 54 +#define CPP_CLK_SRC 55 +#define CSI0_CLK_SRC 56 +#define CSI1_CLK_SRC 57 +#define CSI2_CLK_SRC 58 +#define CSI3_CLK_SRC 59 +#define FD_CORE_CLK_SRC 60 +#define MMSS_CXO_CLK 61 +#define MMSS_SLEEPCLK_CLK 62 +#define MMSS_MMAGIC_AHB_CLK 63 +#define MMSS_MMAGIC_CFG_AHB_CLK 64 +#define MMSS_MISC_AHB_CLK 65 +#define MMSS_MISC_CXO_CLK 66 +#define MMSS_BTO_AHB_CLK 67 +#define MMSS_MMAGIC_AXI_CLK 68 +#define MMSS_S0_AXI_CLK 69 +#define MMSS_MMAGIC_MAXI_CLK 70 +#define DSA_CORE_CLK 71 +#define DSA_NOC_CFG_AHB_CLK 72 +#define MMAGIC_CAMSS_AXI_CLK 73 +#define MMAGIC_CAMSS_NOC_CFG_AHB_CLK 74 +#define THROTTLE_CAMSS_CXO_CLK 75 +#define THROTTLE_CAMSS_AHB_CLK 76 +#define THROTTLE_CAMSS_AXI_CLK 77 +#define SMMU_VFE_AHB_CLK 78 +#define SMMU_VFE_AXI_CLK 79 +#define SMMU_CPP_AHB_CLK 80 +#define SMMU_CPP_AXI_CLK 81 +#define SMMU_JPEG_AHB_CLK 82 +#define SMMU_JPEG_AXI_CLK 83 +#define MMAGIC_MDSS_AXI_CLK 84 +#define MMAGIC_MDSS_NOC_CFG_AHB_CLK 85 +#define THROTTLE_MDSS_CXO_CLK 86 +#define THROTTLE_MDSS_AHB_CLK 87 +#define THROTTLE_MDSS_AXI_CLK 88 +#define SMMU_ROT_AHB_CLK 89 +#define SMMU_ROT_AXI_CLK 90 +#define SMMU_MDP_AHB_CLK 91 +#define SMMU_MDP_AXI_CLK 92 +#define MMAGIC_VIDEO_AXI_CLK 93 +#define MMAGIC_VIDEO_NOC_CFG_AHB_CLK 94 +#define THROTTLE_VIDEO_CXO_CLK 95 +#define THROTTLE_VIDEO_AHB_CLK 96 +#define THROTTLE_VIDEO_AXI_CLK 97 +#define SMMU_VIDEO_AHB_CLK 98 +#define SMMU_VIDEO_AXI_CLK 99 +#define MMAGIC_BIMC_AXI_CLK 100 +#define MMAGIC_BIMC_NOC_CFG_AHB_CLK 101 +#define GPU_GX_GFX3D_CLK 102 +#define GPU_GX_RBBMTIMER_CLK 103 +#define GPU_AHB_CLK 104 +#define GPU_AON_ISENSE_CLK 105 +#define VMEM_MAXI_CLK 106 +#define VMEM_AHB_CLK 107 +#define MMSS_RBCPR_CLK 108 +#define MMSS_RBCPR_AHB_CLK 109 +#define VIDEO_CORE_CLK 110 +#define VIDEO_AXI_CLK 111 +#define VIDEO_MAXI_CLK 112 +#define VIDEO_AHB_CLK 113 +#define VIDEO_SUBCORE0_CLK 114 +#define VIDEO_SUBCORE1_CLK 115 +#define MDSS_AHB_CLK 116 +#define MDSS_HDMI_AHB_CLK 117 +#define MDSS_AXI_CLK 118 +#define MDSS_PCLK0_CLK 119 +#define MDSS_PCLK1_CLK 120 +#define MDSS_MDP_CLK 121 +#define MDSS_EXTPCLK_CLK 122 +#define MDSS_VSYNC_CLK 123 +#define MDSS_HDMI_CLK 124 +#define MDSS_BYTE0_CLK 125 +#define MDSS_BYTE1_CLK 126 +#define MDSS_ESC0_CLK 127 +#define MDSS_ESC1_CLK 128 +#define CAMSS_TOP_AHB_CLK 129 +#define CAMSS_AHB_CLK 130 +#define CAMSS_MICRO_AHB_CLK 131 +#define CAMSS_GP0_CLK 132 +#define CAMSS_GP1_CLK 133 +#define CAMSS_MCLK0_CLK 134 +#define CAMSS_MCLK1_CLK 135 +#define CAMSS_MCLK2_CLK 136 +#define CAMSS_MCLK3_CLK 137 +#define CAMSS_CCI_CLK 138 +#define CAMSS_CCI_AHB_CLK 139 +#define CAMSS_CSI0PHYTIMER_CLK 140 +#define CAMSS_CSI1PHYTIMER_CLK 141 +#define CAMSS_CSI2PHYTIMER_CLK 142 +#define CAMSS_CSIPHY0_3P_CLK 143 +#define CAMSS_CSIPHY1_3P_CLK 144 +#define CAMSS_CSIPHY2_3P_CLK 145 +#define CAMSS_JPEG0_CLK 146 +#define CAMSS_JPEG2_CLK 147 +#define CAMSS_JPEG_DMA_CLK 148 +#define CAMSS_JPEG_AHB_CLK 149 +#define CAMSS_JPEG_AXI_CLK 150 +#define CAMSS_VFE_AHB_CLK 151 +#define CAMSS_VFE_AXI_CLK 152 +#define CAMSS_VFE0_CLK 153 +#define CAMSS_VFE0_STREAM_CLK 154 +#define CAMSS_VFE0_AHB_CLK 155 +#define CAMSS_VFE1_CLK 156 +#define CAMSS_VFE1_STREAM_CLK 157 +#define CAMSS_VFE1_AHB_CLK 158 +#define CAMSS_CSI_VFE0_CLK 159 +#define CAMSS_CSI_VFE1_CLK 160 +#define CAMSS_CPP_VBIF_AHB_CLK 161 +#define CAMSS_CPP_AXI_CLK 162 +#define CAMSS_CPP_CLK 163 +#define CAMSS_CPP_AHB_CLK 164 +#define CAMSS_CSI0_CLK 165 +#define CAMSS_CSI0_AHB_CLK 166 +#define CAMSS_CSI0PHY_CLK 167 +#define CAMSS_CSI0RDI_CLK 168 +#define CAMSS_CSI0PIX_CLK 169 +#define CAMSS_CSI1_CLK 170 +#define CAMSS_CSI1_AHB_CLK 171 +#define CAMSS_CSI1PHY_CLK 172 +#define CAMSS_CSI1RDI_CLK 173 +#define CAMSS_CSI1PIX_CLK 174 +#define CAMSS_CSI2_CLK 175 +#define CAMSS_CSI2_AHB_CLK 176 +#define CAMSS_CSI2PHY_CLK 177 +#define CAMSS_CSI2RDI_CLK 178 +#define CAMSS_CSI2PIX_CLK 179 +#define CAMSS_CSI3_CLK 180 +#define CAMSS_CSI3_AHB_CLK 181 +#define CAMSS_CSI3PHY_CLK 182 +#define CAMSS_CSI3RDI_CLK 183 +#define CAMSS_CSI3PIX_CLK 184 +#define CAMSS_ISPIF_AHB_CLK 185 +#define FD_CORE_CLK 186 +#define FD_CORE_UAR_CLK 187 +#define FD_AHB_CLK 188 +#define MMSS_SPDM_CSI0_CLK 189 +#define MMSS_SPDM_JPEG_DMA_CLK 190 +#define MMSS_SPDM_CPP_CLK 191 +#define MMSS_SPDM_PCLK0_CLK 192 +#define MMSS_SPDM_AHB_CLK 193 +#define MMSS_SPDM_GFX3D_CLK 194 +#define MMSS_SPDM_PCLK1_CLK 195 +#define MMSS_SPDM_JPEG2_CLK 196 +#define MMSS_SPDM_DEBUG_CLK 197 +#define MMSS_SPDM_VFE1_CLK 198 +#define MMSS_SPDM_VFE0_CLK 199 +#define MMSS_SPDM_VIDEO_CORE_CLK 200 +#define MMSS_SPDM_AXI_CLK 201 +#define MMSS_SPDM_MDP_CLK 202 +#define MMSS_SPDM_JPEG0_CLK 203 +#define MMSS_SPDM_RM_AXI_CLK 204 +#define MMSS_SPDM_RM_MAXI_CLK 205 + +#define MMAGICAHB_BCR 0 +#define MMAGIC_CFG_BCR 1 +#define MISC_BCR 2 +#define BTO_BCR 3 +#define MMAGICAXI_BCR 4 +#define MMAGICMAXI_BCR 5 +#define DSA_BCR 6 +#define MMAGIC_CAMSS_BCR 7 +#define THROTTLE_CAMSS_BCR 8 +#define SMMU_VFE_BCR 9 +#define SMMU_CPP_BCR 10 +#define SMMU_JPEG_BCR 11 +#define MMAGIC_MDSS_BCR 12 +#define THROTTLE_MDSS_BCR 13 +#define SMMU_ROT_BCR 14 +#define SMMU_MDP_BCR 15 +#define MMAGIC_VIDEO_BCR 16 +#define THROTTLE_VIDEO_BCR 17 +#define SMMU_VIDEO_BCR 18 +#define MMAGIC_BIMC_BCR 19 +#define GPU_GX_BCR 20 +#define GPU_BCR 21 +#define GPU_AON_BCR 22 +#define VMEM_BCR 23 +#define MMSS_RBCPR_BCR 24 +#define VIDEO_BCR 25 +#define MDSS_BCR 26 +#define CAMSS_TOP_BCR 27 +#define CAMSS_AHB_BCR 28 +#define CAMSS_MICRO_BCR 29 +#define CAMSS_CCI_BCR 30 +#define CAMSS_PHY0_BCR 31 +#define CAMSS_PHY1_BCR 32 +#define CAMSS_PHY2_BCR 33 +#define CAMSS_CSIPHY0_3P_BCR 34 +#define CAMSS_CSIPHY1_3P_BCR 35 +#define CAMSS_CSIPHY2_3P_BCR 36 +#define CAMSS_JPEG_BCR 37 +#define CAMSS_VFE_BCR 38 +#define CAMSS_VFE0_BCR 39 +#define CAMSS_VFE1_BCR 40 +#define CAMSS_CSI_VFE0_BCR 41 +#define CAMSS_CSI_VFE1_BCR 42 +#define CAMSS_CPP_TOP_BCR 43 +#define CAMSS_CPP_BCR 44 +#define CAMSS_CSI0_BCR 45 +#define CAMSS_CSI0RDI_BCR 46 +#define CAMSS_CSI0PIX_BCR 47 +#define CAMSS_CSI1_BCR 48 +#define CAMSS_CSI1RDI_BCR 49 +#define CAMSS_CSI1PIX_BCR 50 +#define CAMSS_CSI2_BCR 51 +#define CAMSS_CSI2RDI_BCR 52 +#define CAMSS_CSI2PIX_BCR 53 +#define CAMSS_CSI3_BCR 54 +#define CAMSS_CSI3RDI_BCR 55 +#define CAMSS_CSI3PIX_BCR 56 +#define CAMSS_ISPIF_BCR 57 +#define FD_BCR 58 +#define MMSS_SPDM_RM_BCR 59 + +#endif diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h new file mode 100644 index 000000000000..ebc7a7b43f52 --- /dev/null +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Xing Zheng <zhengxing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_GPLL 3 +#define ARMCLK 4 + +/* sclk gates (special clocks) */ +#define SCLK_GPU 64 +#define SCLK_SPI 65 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_NANDC 76 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_OTGPHY0 93 +#define SCLK_LCDC 100 +#define SCLK_HDMI 109 +#define SCLK_HEVC 111 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_PVTM_VIDEO 125 +#define SCLK_MAC 151 +#define SCLK_MACREF 152 +#define SCLK_SFC 160 + +/* aclk gates */ +#define ACLK_DMAC2 194 +#define ACLK_LCDC 197 +#define ACLK_VIO 203 +#define ACLK_VCODEC 208 +#define ACLK_CPU 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_SPI 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_HDMI 360 +#define PCLK_CPU 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL 364 +#define PCLK_WDT 368 +#define PCLK_ACODEC 369 + +/* hclk gates */ +#define HCLK_OTG0 449 +#define HCLK_OTG1 450 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_I2S 462 +#define HCLK_LCDC 465 +#define HCLK_ROM 467 +#define HCLK_VIO_BUS 472 +#define HCLK_VCODEC 476 +#define HCLK_CPU 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0 0 +#define SRST_CORE1 1 +#define SRST_CORE0_DBG 4 +#define SRST_CORE1_DBG 5 +#define SRST_CORE0_POR 8 +#define SRST_CORE1_POR 9 +#define SRST_L2C 12 +#define SRST_TOPDBG 13 +#define SRST_STRC_SYS_A 14 +#define SRST_PD_CORE_NIU 15 + +#define SRST_TIMER2 16 +#define SRST_CPUSYS_H 17 +#define SRST_AHB2APB_H 19 +#define SRST_TIMER3 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_PERI_NIU 23 +#define SRST_I2S 24 +#define SRST_DDR_PLL 25 +#define SRST_GPU_DLL 26 +#define SRST_TIMER0 27 +#define SRST_TIMER1 28 +#define SRST_CORE_DLL 29 +#define SRST_EFUSE_P 30 +#define SRST_ACODEC_P 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_SFC 47 + +#define SRST_PWM0 48 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_GRF 55 +#define SRST_PERIPHSYS_A 57 +#define SRST_PERIPHSYS_H 58 +#define SRST_PERIPHSYS_P 59 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA2 64 +#define SRST_MAC 66 +#define SRST_NANDC 68 +#define SRST_USBOTG0 69 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_OTGC1 74 +#define SRST_DDRMSCH 79 + +#define SRST_MMC0 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI0 84 +#define SRST_WDT 86 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 + +#define SRST_HDMI_P 96 +#define SRST_VIO_BUS_H 99 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_USBPOR 105 + +#define SRST_VCODEC_A 112 +#define SRST_VCODEC_H 113 +#define SRST_VIO1_A 114 +#define SRST_HEVC 115 +#define SRST_VCODEC_NIU_A 116 +#define SRST_LCDC1_A 117 +#define SRST_LCDC1_H 118 +#define SRST_LCDC1_D 119 +#define SRST_GPU 120 +#define SRST_GPU_NIU_A 122 + +#define SRST_DBG_P 131 + +#endif diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h new file mode 100644 index 000000000000..a78dd891e24a --- /dev/null +++ b/include/dt-bindings/clock/rk3228-cru.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Jeffy Chen <jeffy.chen@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define ARMCLK 5 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S0 80 +#define SCLK_I2S1 81 +#define SCLK_I2S2 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 + +/* aclk gates */ +#define ACLK_DMAC 194 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_SPI0 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_PERI 363 + +/* hclk gates */ +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_ACLK_CORE 13 +#define SRST_NOC 14 +#define SRST_L2C 15 + +#define SRST_CPUSYS_H 18 +#define SRST_BUSSYS_H 19 +#define SRST_SPDIF 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_OTG_ADP 23 +#define SRST_I2S0 24 +#define SRST_I2S1 25 +#define SRST_I2S2 26 +#define SRST_ACODEC_P 27 +#define SRST_DFIMON 28 +#define SRST_MSCH 29 +#define SRST_EFUSE1024 30 +#define SRST_EFUSE256 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_PERIPH_NOC_A 36 +#define SRST_PERIPH_NOC_BUS_H 37 +#define SRST_PERIPH_NOC_P 38 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_PHYNOC 42 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 + +#define SRST_PWM 48 +#define SRST_A53_GIC 49 +#define SRST_DAP 51 +#define SRST_DAP_NOC 52 +#define SRST_CRYPTO 53 +#define SRST_SGRF 54 +#define SRST_GRF 55 +#define SRST_GMAC 56 +#define SRST_PERIPH_NOC_H 58 +#define SRST_MACPHY 63 + +#define SRST_DMA 64 +#define SRST_NANDC 68 +#define SRST_USBOTG 69 +#define SRST_OTGC 70 +#define SRST_USBHOST0 71 +#define SRST_HOST_CTRL0 72 +#define SRST_USBHOST1 73 +#define SRST_HOST_CTRL1 74 +#define SRST_USBHOST2 75 +#define SRST_HOST_CTRL2 76 +#define SRST_USBPOR0 77 +#define SRST_USBPOR1 78 +#define SRST_DDRMSCH 79 + +#define SRST_SMART_CARD 80 +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI 84 +#define SRST_TSP_H 85 +#define SRST_TSP 86 +#define SRST_TSADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 +#define SRST_HOST0_ECHI 92 +#define SRST_HOST1_ECHI 93 +#define SRST_HOST2_ECHI 94 +#define SRST_VOP_NOC_A 95 + +#define SRST_HDMI_P 96 +#define SRST_VIO_ARBI_H 97 +#define SRST_IEP_NOC_A 98 +#define SRST_VIO_NOC_H 99 +#define SRST_VOP_A 100 +#define SRST_VOP_H 101 +#define SRST_VOP_D 102 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_UTMI2 105 +#define SRST_UTMI3 106 +#define SRST_RGA 107 +#define SRST_RGA_NOC_A 108 +#define SRST_RGA_A 109 +#define SRST_RGA_H 110 +#define SRST_HDCP_A 111 + +#define SRST_VPU_A 112 +#define SRST_VPU_H 113 +#define SRST_VPU_NOC_A 116 +#define SRST_VPU_NOC_H 117 +#define SRST_RKVDEC_A 118 +#define SRST_RKVDEC_NOC_A 119 +#define SRST_RKVDEC_H 120 +#define SRST_RKVDEC_NOC_H 121 +#define SRST_RKVDEC_CORE 122 +#define SRST_RKVDEC_CABAC 123 +#define SRST_IEP_A 124 +#define SRST_IEP_H 125 +#define SRST_GPU_A 126 +#define SRST_GPU_NOC_A 127 + +#define SRST_CORE_DBG 128 +#define SRST_DBG_P 129 +#define SRST_TIMER0 130 +#define SRST_TIMER1 131 +#define SRST_TIMER2 132 +#define SRST_TIMER3 133 +#define SRST_TIMER4 134 +#define SRST_TIMER5 135 +#define SRST_VIO_H2P 136 +#define SRST_HDMIPHY 139 +#define SRST_VDAC 140 +#define SRST_TIMER_6CH_P 141 + +#endif diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index c719aacef14f..9a586e2d9c91 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -86,6 +86,8 @@ #define SCLK_USBPHY480M_SRC 122 #define SCLK_PVTM_CORE 123 #define SCLK_PVTM_GPU 124 +#define SCLK_CRYPTO 125 +#define SCLK_MIPIDSI_24M 126 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -164,6 +166,8 @@ #define PCLK_DDRUPCTL1 366 #define PCLK_PUBL1 367 #define PCLK_WDT 368 +#define PCLK_EFUSE256 369 +#define PCLK_EFUSE1024 370 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h new file mode 100644 index 000000000000..6f45aea49e4f --- /dev/null +++ b/include/dt-bindings/clock/tegra210-car.h @@ -0,0 +1,401 @@ +/* + * This header provides constants for binding nvidia,tegra210-car. + * + * The first 224 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 224 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 224 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA210_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA210_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA210_CLK_ISPB 3 +#define TEGRA210_CLK_RTC 4 +#define TEGRA210_CLK_TIMER 5 +#define TEGRA210_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA210_CLK_GPIO 8 +#define TEGRA210_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA210_CLK_I2S1 11 +#define TEGRA210_CLK_I2C1 12 +/* 13 */ +#define TEGRA210_CLK_SDMMC1 14 +#define TEGRA210_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA210_CLK_PWM 17 +#define TEGRA210_CLK_I2S2 18 +/* 19 */ +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA210_CLK_USBD 22 +#define TEGRA210_CLK_ISP 23 +/* 24 */ +/* 25 */ +#define TEGRA210_CLK_DISP2 26 +#define TEGRA210_CLK_DISP1 27 +#define TEGRA210_CLK_HOST1X 28 +/* 29 */ +#define TEGRA210_CLK_I2S0 30 +/* 31 */ + +#define TEGRA210_CLK_MC 32 +#define TEGRA210_CLK_AHBDMA 33 +#define TEGRA210_CLK_APBDMA 34 +/* 35 */ +/* 36 */ +/* 37 */ +#define TEGRA210_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA210_CLK_KFUSE 40 +#define TEGRA210_CLK_SBC1 41 +/* 42 */ +/* 43 */ +#define TEGRA210_CLK_SBC2 44 +/* 45 */ +#define TEGRA210_CLK_SBC3 46 +#define TEGRA210_CLK_I2C5 47 +#define TEGRA210_CLK_DSIA 48 +/* 49 */ +/* 50 */ +/* 51 */ +#define TEGRA210_CLK_CSI 52 +/* 53 */ +#define TEGRA210_CLK_I2C2 54 +#define TEGRA210_CLK_UARTC 55 +#define TEGRA210_CLK_MIPI_CAL 56 +#define TEGRA210_CLK_EMC 57 +#define TEGRA210_CLK_USB2 58 +/* 59 */ +/* 60 */ +/* 61 */ +/* 62 */ +#define TEGRA210_CLK_BSEV 63 + +/* 64 */ +#define TEGRA210_CLK_UARTD 65 +/* 66 */ +#define TEGRA210_CLK_I2C3 67 +#define TEGRA210_CLK_SBC4 68 +#define TEGRA210_CLK_SDMMC3 69 +#define TEGRA210_CLK_PCIE 70 +#define TEGRA210_CLK_OWR 71 +#define TEGRA210_CLK_AFI 72 +#define TEGRA210_CLK_CSITE 73 +/* 74 */ +/* 75 */ +/* 76 */ +/* 77 */ +#define TEGRA210_CLK_SOC_THERM 78 +#define TEGRA210_CLK_DTV 79 +/* 80 */ +#define TEGRA210_CLK_I2CSLOW 81 +#define TEGRA210_CLK_DSIB 82 +#define TEGRA210_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA210_CLK_XUSB_HOST 89 +/* 90 */ +/* 91 */ +#define TEGRA210_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA210_CLK_MSELECT 99 +#define TEGRA210_CLK_TSENSOR 100 +#define TEGRA210_CLK_I2S3 101 +#define TEGRA210_CLK_I2S4 102 +#define TEGRA210_CLK_I2C4 103 +/* 104 */ +/* 105 */ +#define TEGRA210_CLK_D_AUDIO 106 +/* 107 ( affects abp -> ape) */ +/* 108 */ +/* 109 */ +/* 110 */ +#define TEGRA210_CLK_HDA2CODEC_2X 111 +/* 112 */ +/* 113 */ +/* 114 */ +/* 115 */ +/* 116 */ +/* 117 */ +#define TEGRA210_CLK_SPDIF_2X 118 +#define TEGRA210_CLK_ACTMON 119 +#define TEGRA210_CLK_EXTERN1 120 +#define TEGRA210_CLK_EXTERN2 121 +#define TEGRA210_CLK_EXTERN3 122 +#define TEGRA210_CLK_SATA_OOB 123 +#define TEGRA210_CLK_SATA 124 +#define TEGRA210_CLK_HDA 125 +/* 126 */ +/* 127 */ + +#define TEGRA210_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +/* 136 */ +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* (bit affects xusb_falcon_src, xusb_fs_src, xusb_host_src and xusb_ss_src) */ +#define TEGRA210_CLK_XUSB_GATE 143 +#define TEGRA210_CLK_CILAB 144 +#define TEGRA210_CLK_CILCD 145 +#define TEGRA210_CLK_CILE 146 +#define TEGRA210_CLK_DSIALP 147 +#define TEGRA210_CLK_DSIBLP 148 +#define TEGRA210_CLK_ENTROPY 149 +/* 150 */ +/* 151 */ +/* 152 */ +/* 153 */ +/* 154 */ +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA210_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +#define TEGRA210_CLK_DMIC1 161 +#define TEGRA210_CLK_DMIC2 162 +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA210_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA210_CLK_VIM2_CLK 171 +/* 172 */ +#define TEGRA210_CLK_MIPIBIF 173 +/* 174 */ +/* 175 */ +/* 176 */ +#define TEGRA210_CLK_CLK72MHZ 177 +#define TEGRA210_CLK_VIC03 178 +/* 179 */ +/* 180 */ +#define TEGRA210_CLK_DPAUX 181 +#define TEGRA210_CLK_SOR0 182 +#define TEGRA210_CLK_SOR1 183 +#define TEGRA210_CLK_GPU 184 +#define TEGRA210_CLK_DBGAPB 185 +/* 186 */ +#define TEGRA210_CLK_PLL_P_OUT_ADSP 187 +/* 188 */ +#define TEGRA210_CLK_PLL_G_REF 189 +/* 190 */ +/* 191 */ + +/* 192 */ +#define TEGRA210_CLK_SDMMC_LEGACY 193 +#define TEGRA210_CLK_NVDEC 194 +#define TEGRA210_CLK_NVJPG 195 +/* 196 */ +#define TEGRA210_CLK_DMIC3 197 +#define TEGRA210_CLK_APE 198 +/* 199 */ +/* 200 */ +/* 201 */ +#define TEGRA210_CLK_MAUD 202 +/* 203 */ +/* 204 */ +/* 205 */ +#define TEGRA210_CLK_TSECB 206 +#define TEGRA210_CLK_DPAUX1 207 +#define TEGRA210_CLK_VI_I2C 208 +#define TEGRA210_CLK_HSIC_TRK 209 +#define TEGRA210_CLK_USB2_TRK 210 +#define TEGRA210_CLK_QSPI 211 +#define TEGRA210_CLK_UARTAPE 212 +/* 213 */ +/* 214 */ +/* 215 */ +/* 216 */ +/* 217 */ +/* 218 */ +#define TEGRA210_CLK_NVENC 219 +/* 220 */ +/* 221 */ +#define TEGRA210_CLK_SOR_SAFE 222 +#define TEGRA210_CLK_PLL_P_OUT_CPU 223 + + +#define TEGRA210_CLK_UARTB 224 +#define TEGRA210_CLK_VFIR 225 +#define TEGRA210_CLK_SPDIF_IN 226 +#define TEGRA210_CLK_SPDIF_OUT 227 +#define TEGRA210_CLK_VI 228 +#define TEGRA210_CLK_VI_SENSOR 229 +#define TEGRA210_CLK_FUSE 230 +#define TEGRA210_CLK_FUSE_BURN 231 +#define TEGRA210_CLK_CLK_32K 232 +#define TEGRA210_CLK_CLK_M 233 +#define TEGRA210_CLK_CLK_M_DIV2 234 +#define TEGRA210_CLK_CLK_M_DIV4 235 +#define TEGRA210_CLK_PLL_REF 236 +#define TEGRA210_CLK_PLL_C 237 +#define TEGRA210_CLK_PLL_C_OUT1 238 +#define TEGRA210_CLK_PLL_C2 239 +#define TEGRA210_CLK_PLL_C3 240 +#define TEGRA210_CLK_PLL_M 241 +#define TEGRA210_CLK_PLL_M_OUT1 242 +#define TEGRA210_CLK_PLL_P 243 +#define TEGRA210_CLK_PLL_P_OUT1 244 +#define TEGRA210_CLK_PLL_P_OUT2 245 +#define TEGRA210_CLK_PLL_P_OUT3 246 +#define TEGRA210_CLK_PLL_P_OUT4 247 +#define TEGRA210_CLK_PLL_A 248 +#define TEGRA210_CLK_PLL_A_OUT0 249 +#define TEGRA210_CLK_PLL_D 250 +#define TEGRA210_CLK_PLL_D_OUT0 251 +#define TEGRA210_CLK_PLL_D2 252 +#define TEGRA210_CLK_PLL_D2_OUT0 253 +#define TEGRA210_CLK_PLL_U 254 +#define TEGRA210_CLK_PLL_U_480M 255 + +#define TEGRA210_CLK_PLL_U_60M 256 +#define TEGRA210_CLK_PLL_U_48M 257 +/* 258 */ +#define TEGRA210_CLK_PLL_X 259 +#define TEGRA210_CLK_PLL_X_OUT0 260 +#define TEGRA210_CLK_PLL_RE_VCO 261 +#define TEGRA210_CLK_PLL_RE_OUT 262 +#define TEGRA210_CLK_PLL_E 263 +#define TEGRA210_CLK_SPDIF_IN_SYNC 264 +#define TEGRA210_CLK_I2S0_SYNC 265 +#define TEGRA210_CLK_I2S1_SYNC 266 +#define TEGRA210_CLK_I2S2_SYNC 267 +#define TEGRA210_CLK_I2S3_SYNC 268 +#define TEGRA210_CLK_I2S4_SYNC 269 +#define TEGRA210_CLK_VIMCLK_SYNC 270 +#define TEGRA210_CLK_AUDIO0 271 +#define TEGRA210_CLK_AUDIO1 272 +#define TEGRA210_CLK_AUDIO2 273 +#define TEGRA210_CLK_AUDIO3 274 +#define TEGRA210_CLK_AUDIO4 275 +#define TEGRA210_CLK_SPDIF 276 +#define TEGRA210_CLK_CLK_OUT_1 277 +#define TEGRA210_CLK_CLK_OUT_2 278 +#define TEGRA210_CLK_CLK_OUT_3 279 +#define TEGRA210_CLK_BLINK 280 +/* 281 */ +/* 282 */ +/* 283 */ +#define TEGRA210_CLK_XUSB_HOST_SRC 284 +#define TEGRA210_CLK_XUSB_FALCON_SRC 285 +#define TEGRA210_CLK_XUSB_FS_SRC 286 +#define TEGRA210_CLK_XUSB_SS_SRC 287 + +#define TEGRA210_CLK_XUSB_DEV_SRC 288 +#define TEGRA210_CLK_XUSB_DEV 289 +#define TEGRA210_CLK_XUSB_HS_SRC 290 +#define TEGRA210_CLK_SCLK 291 +#define TEGRA210_CLK_HCLK 292 +#define TEGRA210_CLK_PCLK 293 +#define TEGRA210_CLK_CCLK_G 294 +#define TEGRA210_CLK_CCLK_LP 295 +#define TEGRA210_CLK_DFLL_REF 296 +#define TEGRA210_CLK_DFLL_SOC 297 +#define TEGRA210_CLK_VI_SENSOR2 298 +#define TEGRA210_CLK_PLL_P_OUT5 299 +#define TEGRA210_CLK_CML0 300 +#define TEGRA210_CLK_CML1 301 +#define TEGRA210_CLK_PLL_C4 302 +#define TEGRA210_CLK_PLL_DP 303 +#define TEGRA210_CLK_PLL_E_MUX 304 +#define TEGRA210_CLK_PLL_MB 305 +#define TEGRA210_CLK_PLL_A1 306 +#define TEGRA210_CLK_PLL_D_DSI_OUT 307 +#define TEGRA210_CLK_PLL_C4_OUT0 308 +#define TEGRA210_CLK_PLL_C4_OUT1 309 +#define TEGRA210_CLK_PLL_C4_OUT2 310 +#define TEGRA210_CLK_PLL_C4_OUT3 311 +#define TEGRA210_CLK_PLL_U_OUT 312 +#define TEGRA210_CLK_PLL_U_OUT1 313 +#define TEGRA210_CLK_PLL_U_OUT2 314 +#define TEGRA210_CLK_USB2_HSIC_TRK 315 +#define TEGRA210_CLK_PLL_P_OUT_HSIO 316 +#define TEGRA210_CLK_PLL_P_OUT_XUSB 317 +#define TEGRA210_CLK_XUSB_SSP_SRC 318 +/* 319 */ +/* 320 */ +/* 321 */ +/* 322 */ +/* 323 */ +/* 324 */ +/* 325 */ +/* 326 */ +/* 327 */ +/* 328 */ +/* 329 */ +/* 330 */ +/* 331 */ +/* 332 */ +/* 333 */ +/* 334 */ +/* 335 */ +/* 336 */ +/* 337 */ +/* 338 */ +/* 339 */ +/* 340 */ +/* 341 */ +/* 342 */ +/* 343 */ +/* 344 */ +/* 345 */ +/* 346 */ +/* 347 */ +/* 348 */ +/* 349 */ + +#define TEGRA210_CLK_AUDIO0_MUX 350 +#define TEGRA210_CLK_AUDIO1_MUX 351 +#define TEGRA210_CLK_AUDIO2_MUX 352 +#define TEGRA210_CLK_AUDIO3_MUX 353 +#define TEGRA210_CLK_AUDIO4_MUX 354 +#define TEGRA210_CLK_SPDIF_MUX 355 +#define TEGRA210_CLK_CLK_OUT_1_MUX 356 +#define TEGRA210_CLK_CLK_OUT_2_MUX 357 +#define TEGRA210_CLK_CLK_OUT_3_MUX 358 +#define TEGRA210_CLK_DSIA_MUX 359 +#define TEGRA210_CLK_DSIB_MUX 360 +#define TEGRA210_CLK_SOR0_LVDS 361 +#define TEGRA210_CLK_XUSB_SS_DIV2 362 + +#define TEGRA210_CLK_PLL_M_UD 363 +#define TEGRA210_CLK_PLL_C_UD 364 +#define TEGRA210_CLK_SCLK_MUX 365 + +#define TEGRA210_CLK_CLK_MAX 366 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index 79fcef72ef57..7958bec7de8c 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -6,7 +6,7 @@ * Author: Jacek Anaszewski <j.anaszewski@samsung.com> */ -#ifndef __DT_BINDINGS_LEDS_H__ +#ifndef __DT_BINDINGS_LEDS_H #define __DT_BINDINGS_LEDS_H /* External trigger type */ diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h index 2c8ac4841385..cdb075aae4e1 100644 --- a/include/dt-bindings/mfd/palmas.h +++ b/include/dt-bindings/mfd/palmas.h @@ -7,7 +7,7 @@ * */ -#ifndef __DT_BINDINGS_PALMAS_H__ +#ifndef __DT_BINDINGS_PALMAS_H #define __DT_BINDINGS_PALMAS_H /* External control pins */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index b20cd885c1fd..39fd38cfa8c9 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -35,4 +35,28 @@ extern int system_verify_data(const void *data, unsigned long len, enum key_being_used_for usage); #endif +#ifdef CONFIG_IMA_MOK_KEYRING +extern struct key *ima_mok_keyring; +extern struct key *ima_blacklist_keyring; + +static inline struct key *get_ima_mok_keyring(void) +{ + return ima_mok_keyring; +} +static inline struct key *get_ima_blacklist_keyring(void) +{ + return ima_blacklist_keyring; +} +#else +static inline struct key *get_ima_mok_keyring(void) +{ + return NULL; +} +static inline struct key *get_ima_blacklist_keyring(void) +{ + return NULL; +} +#endif /* CONFIG_IMA_MOK_KEYRING */ + + #endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h index f91ecd9d1bb1..42cf2d991bf4 100644 --- a/include/keys/trusted-type.h +++ b/include/keys/trusted-type.h @@ -18,6 +18,7 @@ #define MAX_KEY_SIZE 128 #define MAX_BLOB_SIZE 512 #define MAX_PCRINFO_SIZE 64 +#define MAX_DIGEST_SIZE 64 struct trusted_key_payload { struct rcu_head rcu; @@ -36,6 +37,10 @@ struct trusted_key_options { uint32_t pcrinfo_len; unsigned char pcrinfo[MAX_PCRINFO_SIZE]; int pcrlock; + uint32_t hash; + uint32_t digest_len; + unsigned char policydigest[MAX_DIGEST_SIZE]; + uint32_t policyhandle; }; extern struct key_type key_type_trusted; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d2f41477f8ae..13a3d537811b 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,6 +279,12 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; +/* + * LRs are stored in reverse order in memory. make sure we index them + * correctly. + */ +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1991aea2ec4c..06ed7e54033e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -37,6 +37,8 @@ #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/dynamic_debug.h> +#include <linux/module.h> +#include <linux/mutex.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -119,6 +121,75 @@ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, const unsigned long end); +/* Debugger support */ + +struct acpi_debugger_ops { + int (*create_thread)(acpi_osd_exec_callback function, void *context); + ssize_t (*write_log)(const char *msg); + ssize_t (*read_cmd)(char *buffer, size_t length); + int (*wait_command_ready)(bool single_step, char *buffer, size_t length); + int (*notify_command_complete)(void); +}; + +struct acpi_debugger { + const struct acpi_debugger_ops *ops; + struct module *owner; + struct mutex lock; +}; + +#ifdef CONFIG_ACPI_DEBUGGER +int __init acpi_debugger_init(void); +int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops); +void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); +int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); +ssize_t acpi_debugger_write_log(const char *msg); +ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); +int acpi_debugger_wait_command_ready(void); +int acpi_debugger_notify_command_complete(void); +#else +static inline int acpi_debugger_init(void) +{ + return -ENODEV; +} + +static inline int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops) +{ + return -ENODEV; +} + +static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) +{ +} + +static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, + void *context) +{ + return -ENODEV; +} + +static inline int acpi_debugger_write_log(const char *msg) +{ + return -ENODEV; +} + +static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) +{ + return -ENODEV; +} + +static inline int acpi_debugger_wait_command_ready(void) +{ + return -ENODEV; +} + +static inline int acpi_debugger_notify_command_complete(void) +{ + return -ENODEV; +} +#endif + #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE void acpi_initrd_override(void *data, size_t size); #else @@ -318,6 +389,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares, bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); +unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 0ddb5c02ad8b..d76a19ba2cff 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -65,6 +65,24 @@ #define ST_UART011_ABCR 0x100 /* Autobaud control register. */ #define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ +/* + * ZTE UART register offsets. This UART has a radically different address + * allocation from the ARM and ST variants, so we list all registers here. + * We assume unlisted registers do not exist. + */ +#define ZX_UART011_DR 0x04 +#define ZX_UART011_FR 0x14 +#define ZX_UART011_IBRD 0x24 +#define ZX_UART011_FBRD 0x28 +#define ZX_UART011_LCRH 0x30 +#define ZX_UART011_CR 0x34 +#define ZX_UART011_IFLS 0x38 +#define ZX_UART011_IMSC 0x40 +#define ZX_UART011_RIS 0x44 +#define ZX_UART011_MIS 0x48 +#define ZX_UART011_ICR 0x4c +#define ZX_UART011_DMACR 0x50 + #define UART011_DR_OE (1 << 11) #define UART011_DR_BE (1 << 10) #define UART011_DR_PE (1 << 9) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 000000000000..b5abfda80465 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include <linux/linkage.h> +#include <linux/types.h> + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. + */ +asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +/** + * arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. + */ +asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/audit.h b/include/linux/audit.h index 20eba1eb0a3c..b40ed5df5542 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -113,6 +113,107 @@ struct filename; extern void audit_log_session_info(struct audit_buffer *ab); +#ifdef CONFIG_AUDIT +/* These are defined in audit.c */ + /* Public API */ +extern __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...); + +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); +extern __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); +extern void audit_log_end(struct audit_buffer *ab); +extern bool audit_string_contains_control(const char *string, + size_t len); +extern void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, + size_t len); +extern void audit_log_n_string(struct audit_buffer *ab, + const char *buf, + size_t n); +extern void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, + size_t n); +extern void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string); +extern void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); +extern void audit_log_link_denied(const char *operation, + struct path *link); +extern void audit_log_lost(const char *message); +#ifdef CONFIG_SECURITY +extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); +#else +static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) +{ } +#endif + +extern int audit_log_task_context(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk); + +extern int audit_update_lsm_rules(void); + + /* Private API (for audit.c only) */ +extern int audit_filter_user(int type); +extern int audit_filter_type(int type); +extern int audit_rule_change(int type, __u32 portid, int seq, + void *data, size_t datasz); +extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); + +extern u32 audit_enabled; +#else /* CONFIG_AUDIT */ +static inline __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...) +{ } +static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, + gfp_t gfp_mask, int type) +{ + return NULL; +} +static inline __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) +{ } +static inline void audit_log_end(struct audit_buffer *ab) +{ } +static inline void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, size_t len) +{ } +static inline void audit_log_n_string(struct audit_buffer *ab, + const char *buf, size_t n) +{ } +static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, size_t n) +{ } +static inline void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string) +{ } +static inline void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path) +{ } +static inline void audit_log_key(struct audit_buffer *ab, char *key) +{ } +static inline void audit_log_link_denied(const char *string, + const struct path *link) +{ } +static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) +{ } +static inline int audit_log_task_context(struct audit_buffer *ab) +{ + return 0; +} +static inline void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk) +{ } +#define audit_enabled 0 +#endif /* CONFIG_AUDIT */ + #ifdef CONFIG_AUDIT_COMPAT_GENERIC #define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) #else @@ -137,7 +238,7 @@ extern void __audit_getname(struct filename *name); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); -extern void __audit_inode_child(const struct inode *parent, +extern void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type); extern void __audit_seccomp(unsigned long syscall, long signr, int code); @@ -202,7 +303,7 @@ static inline void audit_inode_parent_hidden(struct filename *name, __audit_inode(name, dentry, AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); } -static inline void audit_inode_child(const struct inode *parent, +static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { if (unlikely(!audit_dummy_context())) @@ -212,6 +313,9 @@ void audit_core_dumps(long signr); static inline void audit_seccomp(unsigned long syscall, long signr, int code) { + if (!audit_enabled) + return; + /* Force a record to be reported if a signal was delivered. */ if (signr || unlikely(!audit_dummy_context())) __audit_seccomp(syscall, signr, code); @@ -359,7 +463,7 @@ static inline void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) { } -static inline void __audit_inode_child(const struct inode *parent, +static inline void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { } @@ -373,7 +477,7 @@ static inline void audit_file(struct file *file) static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } -static inline void audit_inode_child(const struct inode *parent, +static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { } @@ -446,106 +550,6 @@ static inline bool audit_loginuid_set(struct task_struct *tsk) return uid_valid(audit_get_loginuid(tsk)); } -#ifdef CONFIG_AUDIT -/* These are defined in audit.c */ - /* Public API */ -extern __printf(4, 5) -void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, - const char *fmt, ...); - -extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); -extern __printf(2, 3) -void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); -extern void audit_log_end(struct audit_buffer *ab); -extern bool audit_string_contains_control(const char *string, - size_t len); -extern void audit_log_n_hex(struct audit_buffer *ab, - const unsigned char *buf, - size_t len); -extern void audit_log_n_string(struct audit_buffer *ab, - const char *buf, - size_t n); -extern void audit_log_n_untrustedstring(struct audit_buffer *ab, - const char *string, - size_t n); -extern void audit_log_untrustedstring(struct audit_buffer *ab, - const char *string); -extern void audit_log_d_path(struct audit_buffer *ab, - const char *prefix, - const struct path *path); -extern void audit_log_key(struct audit_buffer *ab, - char *key); -extern void audit_log_link_denied(const char *operation, - struct path *link); -extern void audit_log_lost(const char *message); -#ifdef CONFIG_SECURITY -extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); -#else -static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) -{ } -#endif - -extern int audit_log_task_context(struct audit_buffer *ab); -extern void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk); - -extern int audit_update_lsm_rules(void); - - /* Private API (for audit.c only) */ -extern int audit_filter_user(int type); -extern int audit_filter_type(int type); -extern int audit_rule_change(int type, __u32 portid, int seq, - void *data, size_t datasz); -extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); - -extern u32 audit_enabled; -#else /* CONFIG_AUDIT */ -static inline __printf(4, 5) -void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, - const char *fmt, ...) -{ } -static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, - gfp_t gfp_mask, int type) -{ - return NULL; -} -static inline __printf(2, 3) -void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) -{ } -static inline void audit_log_end(struct audit_buffer *ab) -{ } -static inline void audit_log_n_hex(struct audit_buffer *ab, - const unsigned char *buf, size_t len) -{ } -static inline void audit_log_n_string(struct audit_buffer *ab, - const char *buf, size_t n) -{ } -static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, - const char *string, size_t n) -{ } -static inline void audit_log_untrustedstring(struct audit_buffer *ab, - const char *string) -{ } -static inline void audit_log_d_path(struct audit_buffer *ab, - const char *prefix, - const struct path *path) -{ } -static inline void audit_log_key(struct audit_buffer *ab, char *key) -{ } -static inline void audit_log_link_denied(const char *string, - const struct path *link) -{ } -static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) -{ } -static inline int audit_log_task_context(struct audit_buffer *ab) -{ - return 0; -} -static inline void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk) -{ } -#define audit_enabled 0 -#endif /* CONFIG_AUDIT */ static inline void audit_log_string(struct audit_buffer *ab, const char *buf) { audit_log_n_string(ab, buf, strlen(buf)); diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h new file mode 100644 index 000000000000..c3bdf8c59480 --- /dev/null +++ b/include/linux/badblocks.h @@ -0,0 +1,65 @@ +#ifndef _LINUX_BADBLOCKS_H +#define _LINUX_BADBLOCKS_H + +#include <linux/seqlock.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/stddef.h> +#include <linux/types.h> + +#define BB_LEN_MASK (0x00000000000001FFULL) +#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL) +#define BB_ACK_MASK (0x8000000000000000ULL) +#define BB_MAX_LEN 512 +#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) +#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) +#define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) + +/* Bad block numbers are stored sorted in a single page. + * 64bits is used for each block or extent. + * 54 bits are sector number, 9 bits are extent size, + * 1 bit is an 'acknowledged' flag. + */ +#define MAX_BADBLOCKS (PAGE_SIZE/8) + +struct badblocks { + struct device *dev; /* set by devm_init_badblocks */ + int count; /* count of bad blocks */ + int unacked_exist; /* there probably are unacknowledged + * bad blocks. This is only cleared + * when a read discovers none + */ + int shift; /* shift from sectors to block size + * a -ve shift means badblocks are + * disabled.*/ + u64 *page; /* badblock list */ + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; /* in sectors */ +}; + +int badblocks_check(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors); +int badblocks_set(struct badblocks *bb, sector_t s, int sectors, + int acknowledged); +int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); +void ack_all_badblocks(struct badblocks *bb); +ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); +ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, + int unack); +int badblocks_init(struct badblocks *bb, int enable); +void badblocks_exit(struct badblocks *bb); +struct device; +int devm_init_badblocks(struct device *dev, struct badblocks *bb); +static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) +{ + if (bb->dev != dev) { + dev_WARN_ONCE(dev, 1, "%s: badblocks instance not associated\n", + __func__); + return; + } + badblocks_exit(bb); +} +#endif diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h deleted file mode 100644 index ed3768f4ecc7..000000000000 --- a/include/linux/basic_mmio_gpio.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Basic memory-mapped GPIO controllers. - * - * Copyright 2008 MontaVista Software, Inc. - * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __BASIC_MMIO_GPIO_H -#define __BASIC_MMIO_GPIO_H - -#include <linux/gpio.h> -#include <linux/types.h> -#include <linux/compiler.h> -#include <linux/spinlock_types.h> - -struct bgpio_pdata { - const char *label; - int base; - int ngpio; -}; - -struct device; - -struct bgpio_chip { - struct gpio_chip gc; - - unsigned long (*read_reg)(void __iomem *reg); - void (*write_reg)(void __iomem *reg, unsigned long data); - - void __iomem *reg_dat; - void __iomem *reg_set; - void __iomem *reg_clr; - void __iomem *reg_dir; - - /* Number of bits (GPIOs): <register width> * 8. */ - int bits; - - /* - * Some GPIO controllers work with the big-endian bits notation, - * e.g. in a 8-bits register, GPIO7 is the least significant bit. - */ - unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin); - - /* - * Used to lock bgpio_chip->data. Also, this is needed to keep - * shadowed and real data registers writes together. - */ - spinlock_t lock; - - /* Shadowed data register to clear/set bits safely. */ - unsigned long data; - - /* Shadowed direction registers to clear/set direction safely. */ - unsigned long dir; -}; - -static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) -{ - return container_of(gc, struct bgpio_chip, gc); -} - -int bgpio_remove(struct bgpio_chip *bgc); -int bgpio_init(struct bgpio_chip *bgc, struct device *dev, - unsigned long sz, void __iomem *dat, void __iomem *set, - void __iomem *clr, void __iomem *dirout, void __iomem *dirin, - unsigned long flags); - -#define BGPIOF_BIG_ENDIAN BIT(0) -#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */ -#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ -#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) -#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ -#define BGPIOF_NO_OUTPUT BIT(5) /* only input */ - -#endif /* __BASIC_MMIO_GPIO_H */ diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h index 5582c211f594..8d9d07ec22a5 100644 --- a/include/linux/bcm47xx_wdt.h +++ b/include/linux/bcm47xx_wdt.h @@ -1,7 +1,6 @@ #ifndef LINUX_BCM47XX_WDT_H_ #define LINUX_BCM47XX_WDT_H_ -#include <linux/notifier.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/watchdog.h> @@ -15,8 +14,6 @@ struct bcm47xx_wdt { void *driver_data; struct watchdog_device wdd; - struct notifier_block notifier; - struct notifier_block restart_handler; struct timer_list soft_timer; atomic_t soft_ticks; diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index cf038431a5cc..db51a6ffb7d6 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -579,6 +579,8 @@ struct bcma_pflash { }; #ifdef CONFIG_BCMA_SFLASH +struct mtd_info; + struct bcma_sflash { bool present; u32 window; @@ -592,13 +594,9 @@ struct bcma_sflash { #endif #ifdef CONFIG_BCMA_NFLASH -struct mtd_info; - struct bcma_nflash { bool present; bool boot; /* This is the flash the SoC boots from */ - - struct mtd_info *mtd; }; #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0169ba2e2e64..bfb64d672e19 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -15,6 +15,7 @@ #include <linux/backing-dev-defs.h> #include <linux/wait.h> #include <linux/mempool.h> +#include <linux/pfn.h> #include <linux/bio.h> #include <linux/stringify.h> #include <linux/gfp.h> @@ -797,6 +798,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); extern void blk_queue_exit(struct request_queue *q); extern void blk_start_queue(struct request_queue *q); +extern void blk_start_queue_async(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); @@ -1616,6 +1618,20 @@ static inline bool integrity_req_gap_front_merge(struct request *req, #endif /* CONFIG_BLK_DEV_INTEGRITY */ +/** + * struct blk_dax_ctl - control and output parameters for ->direct_access + * @sector: (input) offset relative to a block_device + * @addr: (output) kernel virtual address for @sector populated by driver + * @pfn: (output) page frame number for @addr populated by driver + * @size: (input) number of bytes requested + */ +struct blk_dax_ctl { + sector_t sector; + void __pmem *addr; + long size; + pfn_t pfn; +}; + struct block_device_operations { int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); @@ -1623,7 +1639,7 @@ struct block_device_operations { int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); long (*direct_access)(struct block_device *, sector_t, void __pmem **, - unsigned long *pfn); + pfn_t *); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ @@ -1642,8 +1658,7 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); -extern long bdev_direct_access(struct block_device *, sector_t, - void __pmem **addr, unsigned long *pfn, long size); +extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index f589222bfa87..35b22f94d2d2 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -19,6 +19,10 @@ extern unsigned long min_low_pfn; * highest page */ extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; #ifndef CONFIG_NO_BOOTMEM /* diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 59f4a7304419..f0ba9c2ec639 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -26,6 +26,7 @@ #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 +#define PHY_ID_BCM7435 0x600d8750 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 diff --git a/include/linux/capability.h b/include/linux/capability.h index af9f0b9e80e6..f314275d4e3f 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -145,24 +145,24 @@ static inline kernel_cap_t cap_invert(const kernel_cap_t c) return dest; } -static inline int cap_isclear(const kernel_cap_t a) +static inline bool cap_isclear(const kernel_cap_t a) { unsigned __capi; CAP_FOR_EACH_U32(__capi) { if (a.cap[__capi] != 0) - return 0; + return false; } - return 1; + return true; } /* * Check if "a" is a subset of "set". - * return 1 if ALL of the capabilities in "a" are also in "set" - * cap_issubset(0101, 1111) will return 1 - * return 0 if ANY of the capabilities in "a" are not in "set" - * cap_issubset(1111, 0101) will return 0 + * return true if ALL of the capabilities in "a" are also in "set" + * cap_issubset(0101, 1111) will return true + * return false if ANY of the capabilities in "a" are not in "set" + * cap_issubset(1111, 0101) will return false */ -static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) +static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set) { kernel_cap_t dest; dest = cap_drop(a, set); @@ -171,12 +171,6 @@ static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) /* Used to decide between falling back on the old suser() or fsuser(). */ -static inline int cap_is_fs_cap(int cap) -{ - const kernel_cap_t __cap_fs_set = CAP_FS_SET; - return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]); -} - static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) { const kernel_cap_t __cap_fs_set = CAP_FS_SET; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 06b77f9dd3f2..7f540f7f588d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -34,17 +34,12 @@ struct seq_file; /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, -#define SUBSYS_TAG(_t) CGROUP_ ## _t, \ - __unused_tag_ ## _t = CGROUP_ ## _t - 1, enum cgroup_subsys_id { #include <linux/cgroup_subsys.h> CGROUP_SUBSYS_COUNT, }; -#undef SUBSYS_TAG #undef SUBSYS -#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START) - /* bits in struct cgroup_subsys_state flags field */ enum { CSS_NO_REF = (1 << 0), /* no reference counting for this css */ @@ -66,7 +61,6 @@ enum { /* cgroup_root->flags */ enum { - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ }; @@ -231,6 +225,14 @@ struct cgroup { int id; /* + * The depth this cgroup is at. The root is at depth zero and each + * step down the hierarchy increments the level. This along with + * ancestor_ids[] can determine whether a given cgroup is a + * descendant of another without traversing the hierarchy. + */ + int level; + + /* * Each non-empty css_set associated with this cgroup contributes * one to populated_cnt. All children with non-zero popuplated_cnt * of their own contribute one. The count is zero iff there's no @@ -285,6 +287,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + + /* ids of the ancestors at each level including self */ + int ancestor_ids[]; }; /* @@ -304,6 +309,9 @@ struct cgroup_root { /* The root cgroup. Root is destroyed on its release. */ struct cgroup cgrp; + /* for cgrp->ancestor_ids[0] */ + int cgrp_ancestor_id_storage; + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; @@ -425,9 +433,9 @@ struct cgroup_subsys { int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); - int (*can_fork)(struct task_struct *task, void **priv_p); - void (*cancel_fork)(struct task_struct *task, void *priv); - void (*fork)(struct task_struct *task, void *priv); + int (*can_fork)(struct task_struct *task); + void (*cancel_fork)(struct task_struct *task); + void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); @@ -513,7 +521,6 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #else /* CONFIG_CGROUPS */ -#define CGROUP_CANFORK_COUNT 0 #define CGROUP_SUBSYS_COUNT 0 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} @@ -521,4 +528,116 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +/* + * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains + * per-socket cgroup information except for memcg association. + * + * On legacy hierarchies, net_prio and net_cls controllers directly set + * attributes on each sock which can then be tested by the network layer. + * On the default hierarchy, each sock is associated with the cgroup it was + * created in and the networking layer can match the cgroup directly. + * + * To avoid carrying all three cgroup related fields separately in sock, + * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. + * On boot, sock_cgroup_data records the cgroup that the sock was created + * in so that cgroup2 matches can be made; however, once either net_prio or + * net_cls starts being used, the area is overriden to carry prioidx and/or + * classid. The two modes are distinguished by whether the lowest bit is + * set. Clear bit indicates cgroup pointer while set bit prioidx and + * classid. + * + * While userland may start using net_prio or net_cls at any time, once + * either is used, cgroup2 matching no longer works. There is no reason to + * mix the two and this is in line with how legacy and v2 compatibility is + * handled. On mode switch, cgroup references which are already being + * pointed to by socks may be leaked. While this can be remedied by adding + * synchronization around sock_cgroup_data, given that the number of leaked + * cgroups is bound and highly unlikely to be high, this seems to be the + * better trade-off. + */ +struct sock_cgroup_data { + union { +#ifdef __LITTLE_ENDIAN + struct { + u8 is_data; + u8 padding; + u16 prioidx; + u32 classid; + } __packed; +#else + struct { + u32 classid; + u16 prioidx; + u8 padding; + u8 is_data; + } __packed; +#endif + u64 val; + }; +}; + +/* + * There's a theoretical window where the following accessors race with + * updaters and return part of the previous pointer as the prioidx or + * classid. Such races are short-lived and the result isn't critical. + */ +static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) +{ + /* fallback to 1 which is always the ID of the root cgroup */ + return (skcd->is_data & 1) ? skcd->prioidx : 1; +} + +static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) +{ + /* fallback to 0 which is the unconfigured default classid */ + return (skcd->is_data & 1) ? skcd->classid : 0; +} + +/* + * If invoked concurrently, the updaters may clobber each other. The + * caller is responsible for synchronization. + */ +static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, + u16 prioidx) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, + u32 classid) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +#else /* CONFIG_SOCK_CGROUP_DATA */ + +struct sock_cgroup_data { +}; + +#endif /* CONFIG_SOCK_CGROUP_DATA */ + #endif /* _LINUX_CGROUP_DEFS_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index cb91b44f5f78..2162dca88dc0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -81,7 +81,8 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); -bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); +struct cgroup *cgroup_get_from_path(const char *path); + int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); @@ -96,12 +97,9 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); void cgroup_fork(struct task_struct *p); -extern int cgroup_can_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]); -extern void cgroup_cancel_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]); -extern void cgroup_post_fork(struct task_struct *p, - void *old_ss_priv[CGROUP_CANFORK_COUNT]); +extern int cgroup_can_fork(struct task_struct *p); +extern void cgroup_cancel_fork(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); void cgroup_exit(struct task_struct *p); void cgroup_free(struct task_struct *p); @@ -364,6 +362,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) percpu_ref_put_many(&css->refcnt, n); } +static inline void cgroup_put(struct cgroup *cgrp) +{ + css_put(&cgrp->self); +} + /** * task_css_set_check - obtain a task's css_set with extra access conditions * @task: the task to obtain css_set for @@ -471,6 +474,23 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } +/** + * cgroup_is_descendant - test ancestry + * @cgrp: the cgroup to be tested + * @ancestor: possible ancestor of @cgrp + * + * Test whether @cgrp is a descendant of @ancestor. It also returns %true + * if @cgrp == @ancestor. This function is safe to call as long as @cgrp + * and @ancestor are accessible. + */ +static inline bool cgroup_is_descendant(struct cgroup *cgrp, + struct cgroup *ancestor) +{ + if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) + return false; + return cgrp->ancestor_ids[ancestor->level] == ancestor->id; +} + /* no synchronization, the result can only be used as a hint */ static inline bool cgroup_is_populated(struct cgroup *cgrp) { @@ -539,13 +559,9 @@ static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { return -EINVAL; } static inline void cgroup_fork(struct task_struct *p) {} -static inline int cgroup_can_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) -{ return 0; } -static inline void cgroup_cancel_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) {} -static inline void cgroup_post_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) {} +static inline int cgroup_can_fork(struct task_struct *p) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} @@ -554,4 +570,45 @@ static inline int cgroup_init(void) { return 0; } #endif /* !CONFIG_CGROUPS */ +/* + * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data + * definition in cgroup-defs.h. + */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) +extern spinlock_t cgroup_sk_update_lock; +#endif + +void cgroup_sk_alloc_disable(void); +void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_free(struct sock_cgroup_data *skcd); + +static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +{ +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) + unsigned long v; + + /* + * @skcd->val is 64bit but the following is safe on 32bit too as we + * just need the lower ulong to be written and read atomically. + */ + v = READ_ONCE(skcd->val); + + if (v & 1) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +#else + return (struct cgroup *)(unsigned long)skcd->val; +#endif +} + +#else /* CONFIG_CGROUP_DATA */ + +static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} + +#endif /* CONFIG_CGROUP_DATA */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1a96fdaa33d5..0df0336acee9 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -6,14 +6,8 @@ /* * This file *must* be included with SUBSYS() defined. - * SUBSYS_TAG() is a noop if undefined. */ -#ifndef SUBSYS_TAG -#define __TMP_SUBSYS_TAG -#define SUBSYS_TAG(_x) -#endif - #if IS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif @@ -58,17 +52,10 @@ SUBSYS(net_prio) SUBSYS(hugetlb) #endif -/* - * Subsystems that implement the can_fork() family of callbacks. - */ -SUBSYS_TAG(CANFORK_START) - #if IS_ENABLED(CONFIG_CGROUP_PIDS) SUBSYS(pids) #endif -SUBSYS_TAG(CANFORK_END) - /* * The following subsystems are not supported on the default hierarchy. */ @@ -76,11 +63,6 @@ SUBSYS_TAG(CANFORK_END) SUBSYS(debug) #endif -#ifdef __TMP_SUBSYS_TAG -#undef __TMP_SUBSYS_TAG -#undef SUBSYS_TAG -#endif - /* * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index c56988ac63f7..1143e38555a4 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -31,6 +31,7 @@ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ #define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ +#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ struct clk; struct clk_hw; @@ -44,7 +45,7 @@ struct dentry; * @rate: Requested clock rate. This field will be adjusted by * clock drivers according to hardware capabilities. * @min_rate: Minimum rate imposed by clk users. - * @max_rate: Maximum rate a imposed by clk users. + * @max_rate: Maximum rate imposed by clk users. * @best_parent_rate: The best parent rate a parent can provide to fulfill the * requested constraints. * @best_parent_hw: The most appropriate parent clock that fulfills the @@ -715,8 +716,7 @@ static inline int of_clk_add_provider(struct device_node *np, { return 0; } -#define of_clk_del_provider(np) \ - { while (0); } +static inline void of_clk_del_provider(struct device_node *np) {} static inline struct clk *of_clk_src_simple_get( struct of_phandle_args *clkspec, void *data) { @@ -741,8 +741,7 @@ static inline const char *of_clk_get_parent_name(struct device_node *np, { return NULL; } -#define of_clk_init(matches) \ - { while (0); } +static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ /* diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 223be696df27..75205df29b9c 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -286,6 +286,7 @@ struct ti_clk_features { #define TI_CLK_DPLL_HAS_FREQSEL BIT(0) #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) +#define TI_CLK_ERRATA_I810 BIT(3) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7784b597e959..6013021a3b39 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -62,12 +62,18 @@ struct module; * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary * @owner: module reference, must be set by clocksource in modules + * + * Note: This struct is not used in hotpathes of the timekeeping code + * because the timekeeper caches the hot path fields in its own data + * structure, so no line cache alignment is required, + * + * The pointer to the clocksource itself is handed to the read + * callback. If you need extra information there you can wrap struct + * clocksource into your own struct. Depending on the amount of + * information you need you should consider to cache line align that + * structure. */ struct clocksource { - /* - * Hotpath data, fits in a single cache line when the - * clocksource itself is cacheline aligned. - */ cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; @@ -95,7 +101,7 @@ struct clocksource { cycle_t wd_last; #endif struct module *owner; -} ____cacheline_aligned; +}; /* * Clock source flags bits:: diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4dac1036594f..00b042c49ccd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -299,6 +299,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) +/** + * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * The control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. ACQUIRE. + */ +#define smp_cond_acquire(cond) do { \ + while (!(cond)) \ + cpu_relax(); \ + smp_rmb(); /* ctrl + rmb := acquire */ \ +} while (0) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/linux/component.h b/include/linux/component.h index c00dcc302611..a559eebc0e0f 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -1,39 +1,48 @@ #ifndef COMPONENT_H #define COMPONENT_H +#include <linux/stddef.h> + struct device; struct component_ops { - int (*bind)(struct device *, struct device *, void *); - void (*unbind)(struct device *, struct device *, void *); + int (*bind)(struct device *comp, struct device *master, + void *master_data); + void (*unbind)(struct device *comp, struct device *master, + void *master_data); }; int component_add(struct device *, const struct component_ops *); void component_del(struct device *, const struct component_ops *); -int component_bind_all(struct device *, void *); -void component_unbind_all(struct device *, void *); +int component_bind_all(struct device *master, void *master_data); +void component_unbind_all(struct device *master, void *master_data); struct master; struct component_master_ops { - int (*add_components)(struct device *, struct master *); - int (*bind)(struct device *); - void (*unbind)(struct device *); + int (*bind)(struct device *master); + void (*unbind)(struct device *master); }; -int component_master_add(struct device *, const struct component_master_ops *); void component_master_del(struct device *, const struct component_master_ops *); -int component_master_add_child(struct master *master, - int (*compare)(struct device *, void *), void *compare_data); - struct component_match; int component_master_add_with_match(struct device *, const struct component_master_ops *, struct component_match *); -void component_match_add(struct device *, struct component_match **, +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); +static inline void component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + component_match_add_release(master, matchptr, NULL, compare, + compare_data); +} + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 758a029011b1..f7300d023dbe 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -51,6 +51,7 @@ struct module; struct configfs_item_operations; struct configfs_group_operations; struct configfs_attribute; +struct configfs_bin_attribute; struct configfs_subsystem; struct config_item { @@ -84,6 +85,7 @@ struct config_item_type { struct configfs_item_operations *ct_item_ops; struct configfs_group_operations *ct_group_ops; struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; }; /** @@ -154,6 +156,54 @@ static struct configfs_attribute _pfx##attr_##_name = { \ .store = _pfx##_name##_store, \ } +struct file; +struct vm_area_struct; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; /* std. attribute */ + void *cb_private; /* for user */ + size_t cb_max_size; /* max core size */ + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +#define CONFIGFS_BIN_ATTR(_pfx, _name, _priv, _maxsz) \ +static struct configfs_bin_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO | S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ + .write = _pfx##_name##_write, \ +} + +#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ +} + +#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .write = _pfx##_name##_write, \ +} + /* * If allow_link() exists, the item can symlink(2) out to other * items. If the item is a group, it may support mkdir(2). diff --git a/include/linux/console.h b/include/linux/console.h index bd194343c346..ea731af2451e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -150,6 +150,7 @@ extern int console_trylock(void); extern void console_unlock(void); extern void console_conditional_schedule(void); extern void console_unblank(void); +extern void console_flush_on_panic(void); extern struct tty_driver *console_device(int *); extern void console_stop(struct console *); extern void console_start(struct console *); diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 68b575afe5f5..d259274238db 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -86,7 +86,7 @@ static inline void context_tracking_init(void) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN static inline void guest_enter(void) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_guest_enter(current); else current->flags |= PF_VCPU; @@ -100,7 +100,7 @@ static inline void guest_exit(void) if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_GUEST); - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index ee956c528fab..1d34fe68f48a 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -22,12 +22,12 @@ struct context_tracking { }; #ifdef CONFIG_CONTEXT_TRACKING -extern struct static_key context_tracking_enabled; +extern struct static_key_false context_tracking_enabled; DECLARE_PER_CPU(struct context_tracking, context_tracking); static inline bool context_tracking_is_enabled(void) { - return static_key_false(&context_tracking_enabled); + return static_branch_unlikely(&context_tracking_enabled); } static inline bool context_tracking_cpu_is_enabled(void) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 177c7680c1a8..88a4215125bc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -278,7 +278,6 @@ struct cpufreq_driver { struct freq_attr **attr; /* platform specific boost support code */ - bool boost_supported; bool boost_enabled; int (*set_boost)(int state); }; @@ -574,7 +573,6 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); #ifdef CONFIG_CPU_FREQ int cpufreq_boost_trigger_state(int state); -int cpufreq_boost_supported(void); int cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); @@ -583,10 +581,6 @@ static inline int cpufreq_boost_trigger_state(int state) { return 0; } -static inline int cpufreq_boost_supported(void) -{ - return 0; -} static inline int cpufreq_boost_enabled(void) { return 0; diff --git a/include/linux/dca.h b/include/linux/dca.h index d27a7a05718d..ad956c2e07a8 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb); struct dca_provider { struct list_head node; - struct dca_ops *ops; + const struct dca_ops *ops; struct device *cd; int id; }; @@ -53,7 +53,8 @@ struct dca_ops { int (*dev_managed) (struct dca_provider *, struct device *); }; -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); void unregister_dca_provider(struct dca_provider *dca, struct device *dev); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d67ae119cf4e..7781ce110503 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -27,10 +27,10 @@ struct vfsmount; /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN - #define HASH_LEN_DECLARE u32 hash; u32 len; + #define HASH_LEN_DECLARE u32 hash; u32 len #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else - #define HASH_LEN_DECLARE u32 len; u32 hash; + #define HASH_LEN_DECLARE u32 len; u32 hash #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h new file mode 100644 index 000000000000..f7fa76ae1a9b --- /dev/null +++ b/include/linux/delayed_call.h @@ -0,0 +1,34 @@ +#ifndef _DELAYED_CALL_H +#define _DELAYED_CALL_H + +/* + * Poor man's closures; I wish we could've done them sanely polymorphic, + * but... + */ + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +#define DEFINE_DELAYED_CALL(name) struct delayed_call name = {NULL, NULL} + +/* I really wish we had closures with sane typechecking... */ +static inline void set_delayed_call(struct delayed_call *call, + void (*fn)(void *), void *arg) +{ + call->fn = fn; + call->arg = arg; +} + +static inline void do_delayed_call(struct delayed_call *call) +{ + if (call->fn) + call->fn(call->arg); +} + +static inline void clear_delayed_call(struct delayed_call *call) +{ + call->fn = NULL; +} +#endif diff --git a/include/linux/device.h b/include/linux/device.h index b8f411b57dcb..f627ba20a46c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -191,6 +191,7 @@ extern int bus_unregister_notifier(struct bus_type *bus, unbound */ #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound from the device */ +#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ extern struct kset *bus_get_kset(struct bus_type *bus); extern struct klist *bus_get_device_klist(struct bus_type *bus); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..16a1cad30c33 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -607,11 +607,38 @@ enum dmaengine_alignment { }; /** + * struct dma_slave_map - associates slave device and it's slave channel with + * parameter to be used by a filter function + * @devname: name of the device + * @slave: slave channel name + * @param: opaque parameter to pass to struct dma_filter.fn + */ +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +/** + * struct dma_filter - information for slave device/channel to filter_fn/param + * mapping + * @fn: filter function callback + * @mapcnt: number of slave device/channel in the map + * @map: array of channel to filter mapping data + */ +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +/** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list + * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability @@ -654,11 +681,14 @@ enum dmaengine_alignment { * paused. Returns 0 or an error code * @device_terminate_all: Aborts all transfers on a channel. Returns 0 * or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + * current context. * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion */ struct dma_device { @@ -666,6 +696,7 @@ struct dma_device { unsigned int privatecnt; struct list_head channels; struct list_head global_node; + struct dma_filter filter; dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; @@ -681,6 +712,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); @@ -737,6 +769,7 @@ struct dma_device { int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); + void (*device_synchronize)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -828,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */ static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -836,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) return -ENOSYS; } +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ + might_sleep(); + + if (chan->device->device_synchronize) + chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ + int ret; + + ret = dmaengine_terminate_async(chan); + if (ret) + return ret; + + dmaengine_synchronize(chan); + + return 0; +} + static inline int dmaengine_pause(struct dma_chan *chan) { if (chan->device->device_pause) @@ -1140,9 +1262,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); -struct dma_chan *dma_request_slave_channel_reason(struct device *dev, - const char *name); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); + +struct dma_chan *dma_request_chan(struct device *dev, const char *name); +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); + void dma_release_channel(struct dma_chan *chan); int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); #else @@ -1166,16 +1290,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, { return NULL; } -static inline struct dma_chan *dma_request_slave_channel_reason( - struct device *dev, const char *name) -{ - return ERR_PTR(-ENODEV); -} static inline struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { return NULL; } +static inline struct dma_chan *dma_request_chan(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *dma_request_chan_by_mask( + const dma_cap_mask_t *mask) +{ + return ERR_PTR(-ENODEV); +} static inline void dma_release_channel(struct dma_chan *chan) { } @@ -1186,6 +1315,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, } #endif +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 5055ac34142d..5e9c74cf8894 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -22,6 +22,7 @@ enum dmi_device_type { DMI_DEV_TYPE_IPMI = -1, DMI_DEV_TYPE_OEM_STRING = -2, DMI_DEV_TYPE_DEV_ONBOARD = -3, + DMI_DEV_TYPE_DEV_SLOT = -4, }; enum dmi_entry_type { diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h index 82a16527b367..ff8b55359648 100644 --- a/include/linux/dqblk_qtree.h +++ b/include/linux/dqblk_qtree.h @@ -34,7 +34,7 @@ struct qtree_mem_dqinfo { unsigned int dqi_entry_size; /* Size of quota entry in quota file */ unsigned int dqi_usable_bs; /* Space usable in block for quota data */ unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ - struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ + const struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ }; int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); diff --git a/include/linux/edac.h b/include/linux/edac.h index 4fe67b853de0..9e0d78966552 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -28,12 +28,10 @@ struct device; extern int edac_op_state; extern int edac_err_assert; extern atomic_t edac_handlers; -extern struct bus_type edac_subsys; extern int edac_handler_set(void); extern void edac_atomic_assert_error(void); extern struct bus_type *edac_get_sysfs_subsys(void); -extern void edac_put_sysfs_subsys(void); enum { EDAC_REPORTING_ENABLED, @@ -237,8 +235,10 @@ enum mem_type { #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) #define MEM_FLAG_XDR BIT(MEM_XDR) -#define MEM_FLAG_DDR3 BIT(MEM_DDR3) -#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_DDR3 BIT(MEM_DDR3) +#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_DDR4 BIT(MEM_DDR4) +#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) /** * enum edac-type - Error Detection and Correction capabilities and mode diff --git a/include/linux/err.h b/include/linux/err.h index a729120644d5..56762ab41713 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -37,7 +37,7 @@ static inline bool __must_check IS_ERR(__force const void *ptr) static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) { - return !ptr || IS_ERR_VALUE((unsigned long)ptr); + return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); } /** diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index eb049c622208..37ff4a6faa9a 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,6 +29,9 @@ #include <asm/bitsperlong.h> #ifdef __KERNEL__ +struct device; +int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); +unsigned char *arch_get_platform_get_mac_address(void); u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/evm.h b/include/linux/evm.h index 1fcb88ca88de..35ed9a8a403a 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -14,6 +14,7 @@ struct integrity_iint_cache; #ifdef CONFIG_EVM +extern int evm_set_key(void *key, size_t keylen); extern enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, void *xattr_value, @@ -42,6 +43,12 @@ static inline int posix_xattr_acl(const char *xattrname) } #endif #else + +static inline int evm_set_key(void *key, size_t keylen) +{ + return -EOPNOTSUPP; +} + #ifdef CONFIG_INTEGRITY static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 25c6324a0dd0..e59c3be92106 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -51,6 +51,7 @@ #define MAX_ACTIVE_DATA_LOGS 8 #define VERSION_LEN 256 +#define MAX_VOLUME_NAME 512 /* * For superblock @@ -84,7 +85,7 @@ struct f2fs_super_block { __le32 node_ino; /* node inode number */ __le32 meta_ino; /* meta inode number */ __u8 uuid[16]; /* 128-bit uuid for volume */ - __le16 volume_name[512]; /* volume name */ + __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ __le32 cp_payload; diff --git a/include/linux/fb.h b/include/linux/fb.h index 3d003805aac3..55433f86f0a3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -175,9 +175,27 @@ struct fb_blit_caps { u32 flags; }; +#ifdef CONFIG_FB_NOTIFY extern int fb_register_client(struct notifier_block *nb); extern int fb_unregister_client(struct notifier_block *nb); extern int fb_notifier_call_chain(unsigned long val, void *v); +#else +static inline int fb_register_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_unregister_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_notifier_call_chain(unsigned long val, void *v) +{ + return 0; +}; +#endif + /* * Pixmap structure definition * diff --git a/include/linux/filter.h b/include/linux/filter.h index 4165e9ac9e36..43aa1f8855c7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -350,25 +350,43 @@ struct sk_filter { #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN + +static inline u8 *bpf_skb_cb(struct sk_buff *skb) +{ + /* eBPF programs may read/write skb->cb[] area to transfer meta + * data between tail calls. Since this also needs to work with + * tc, that scratch memory is mapped to qdisc_skb_cb's data area. + * + * In some socket filter cases, the cb unfortunately needs to be + * saved/restored so that protocol specific skb->cb[] data won't + * be lost. In any case, due to unpriviledged eBPF programs + * attached to sockets, we need to clear the bpf_skb_cb() area + * to not leak previous contents to user space. + */ + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + FIELD_SIZEOF(struct qdisc_skb_cb, data)); + + return qdisc_skb_cb(skb)->data; +} + static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { - u8 *cb_data = qdisc_skb_cb(skb)->data; - u8 saved_cb[QDISC_CB_PRIV_LEN]; + u8 *cb_data = bpf_skb_cb(skb); + u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; - BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != - QDISC_CB_PRIV_LEN); - if (unlikely(prog->cb_access)) { - memcpy(saved_cb, cb_data, sizeof(saved_cb)); - memset(cb_data, 0, sizeof(saved_cb)); + memcpy(cb_saved, cb_data, sizeof(cb_saved)); + memset(cb_data, 0, sizeof(cb_saved)); } res = BPF_PROG_RUN(prog, skb); if (unlikely(prog->cb_access)) - memcpy(cb_data, saved_cb, sizeof(saved_cb)); + memcpy(cb_data, cb_saved, sizeof(cb_saved)); return res; } @@ -376,10 +394,11 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { - u8 *cb_data = qdisc_skb_cb(skb)->data; + u8 *cb_data = bpf_skb_cb(skb); if (unlikely(prog->cb_access)) - memset(cb_data, 0, QDISC_CB_PRIV_LEN); + memset(cb_data, 0, BPF_SKB_CB_LEN); + return BPF_PROG_RUN(prog, skb); } @@ -447,6 +466,8 @@ void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); +int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); @@ -493,6 +514,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp) #define BPF_ANC BIT(15) +static inline bool bpf_needs_clear_a(const struct sock_filter *first) +{ + switch (first->code) { + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + return false; + + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: + if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) + return true; + return false; + + default: + return true; + } +} + static inline u16 bpf_anc_helper(const struct sock_filter *ftest) { BUG_ON(ftest->code & BPF_ANC); diff --git a/include/linux/fs.h b/include/linux/fs.h index 3aa514254161..eb73d74ed992 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -31,6 +31,7 @@ #include <linux/blk_types.h> #include <linux/workqueue.h> #include <linux/percpu-rwsem.h> +#include <linux/delayed_call.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -482,6 +483,9 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; +#ifdef CONFIG_FS_DAX + int bd_map_count; +#endif }; /* @@ -1042,7 +1046,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* fs/locks.c */ -void locks_free_lock_context(struct file_lock_context *ctx); +void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); @@ -1103,7 +1107,7 @@ static inline int fcntl_getlease(struct file *filp) } static inline void -locks_free_lock_context(struct file_lock_context *ctx) +locks_free_lock_context(struct inode *inode) { } @@ -1629,16 +1633,21 @@ struct file_operations { #ifndef CONFIG_MMU unsigned (*mmap_capabilities)(struct file *); #endif + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, + loff_t, size_t, unsigned int); + int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, + u64); + ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *, + u64); }; struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); - const char * (*follow_link) (struct dentry *, void **); + const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); int (*permission) (struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); - void (*put_link) (struct inode *, void *); int (*create) (struct inode *,struct dentry *, umode_t, bool); int (*link) (struct dentry *,struct inode *,struct dentry *); @@ -1680,6 +1689,12 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *); extern ssize_t vfs_writev(struct file *, const struct iovec __user *, unsigned long, loff_t *); +extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, + loff_t, size_t, unsigned int); +extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); +extern int vfs_dedupe_file_range(struct file *file, + struct file_dedupe_range *same); struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); @@ -2027,12 +2042,9 @@ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) -#define FLOCK_VERIFY_READ 1 -#define FLOCK_VERIFY_WRITE 2 - -#ifdef CONFIG_FILE_LOCKING +#ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); -extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); +extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); /* * Candidates for mandatory locking have the setgid bit set @@ -2062,19 +2074,59 @@ static inline int locks_verify_locked(struct file *file) } static inline int locks_verify_truncate(struct inode *inode, - struct file *filp, + struct file *f, loff_t size) { - if (inode->i_flctx && mandatory_lock(inode)) - return locks_mandatory_area( - FLOCK_VERIFY_WRITE, inode, filp, - size < inode->i_size ? size : inode->i_size, - (size < inode->i_size ? inode->i_size - size - : size - inode->i_size) - ); + if (!inode->i_flctx || !mandatory_lock(inode)) + return 0; + + if (size < inode->i_size) { + return locks_mandatory_area(inode, f, size, inode->i_size - 1, + F_WRLCK); + } else { + return locks_mandatory_area(inode, f, inode->i_size, size - 1, + F_WRLCK); + } +} + +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(struct inode *inode, struct file *filp, + loff_t start, loff_t end, unsigned char type) +{ return 0; } +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + +#ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { /* @@ -2136,39 +2188,6 @@ static inline int break_layout(struct inode *inode, bool wait) } #else /* !CONFIG_FILE_LOCKING */ -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(int rw, struct inode *inode, - struct file *filp, loff_t offset, - size_t count) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; @@ -2264,6 +2283,14 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); +#ifdef CONFIG_FS_DAX +extern bool blkdev_dax_capable(struct block_device *bdev); +#else +static inline bool blkdev_dax_capable(struct block_device *bdev) +{ + return false; +} +#endif extern struct super_block *blockdev_superblock; @@ -2291,9 +2318,9 @@ static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void { } -static inline int sb_is_blkdev_sb(struct super_block *sb) +static inline bool sb_is_blkdev_sb(struct super_block *sb) { - return 0; + return false; } #endif extern int sync_filesystem(struct super_block *); @@ -2371,7 +2398,7 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); -extern int is_bad_inode(struct inode *); +extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK /* @@ -2532,8 +2559,8 @@ extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ -extern int is_subdir(struct dentry *, struct dentry *); -extern int path_is_under(struct path *, struct path *); +extern bool is_subdir(struct dentry *, struct dentry *); +extern bool path_is_under(struct path *, struct path *); extern char *file_path(struct file *, char *, int); @@ -2660,6 +2687,8 @@ extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof); extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); +extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); +extern loff_t no_seek_end_llseek(struct file *, loff_t, int); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); @@ -2736,14 +2765,14 @@ extern const struct file_operations generic_ro_fops; extern int readlink_copy(char __user *, int, const char *); extern int page_readlink(struct dentry *, char __user *, int); -extern const char *page_follow_link_light(struct dentry *, void **); -extern void page_put_link(struct inode *, void *); +extern const char *page_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern void page_put_link(void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; -extern void kfree_put_link(struct inode *, void *); -extern void free_page_put_link(struct inode *, void *); +extern void kfree_link(void *); extern int generic_readlink(struct dentry *, char __user *, int); extern void generic_fillattr(struct inode *, struct kstat *); int vfs_getattr_nosec(struct path *path, struct kstat *stat); @@ -2754,7 +2783,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes); loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); -const char *simple_follow_link(struct dentry *, void **); +const char *simple_get_link(struct dentry *, struct inode *, + struct delayed_call *); extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); @@ -2764,8 +2794,6 @@ extern int vfs_lstat(const char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); extern int vfs_fstatat(int , const char __user *, struct kstat *, int); -extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, - unsigned long arg); extern int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, @@ -2963,7 +2991,7 @@ int __init get_filesystem_list(char *buf); #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ (flag & __FMODE_NONOTIFY))) -static inline int is_sxid(umode_t mode) +static inline bool is_sxid(umode_t mode) { return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } @@ -3025,5 +3053,6 @@ static inline bool dir_relax(struct inode *inode) } extern bool path_noexec(const struct path *path); +extern void inode_nohighmem(struct inode *inode); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h new file mode 100644 index 000000000000..90d64d4ec1a9 --- /dev/null +++ b/include/linux/fsl/edac.h @@ -0,0 +1,8 @@ +#ifndef FSL_EDAC_H +#define FSL_EDAC_H + +struct mpc85xx_edac_pci_plat_data { + struct device_node *of_node; +}; + +#endif diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 533c4408529a..6b7e89f45aa4 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -220,7 +220,10 @@ struct fsnotify_mark { /* List of marks by group->i_fsnotify_marks. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ - struct list_head g_list; + union { + struct list_head g_list; + struct rcu_head g_rcu; + }; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; /* List of marks for inode / vfsmount [obj_lock] */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index eae6548efbf0..0639dcc98195 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care - * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops - * could be controled by following calls: + * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops + * could be controlled by following calls: * ftrace_function_local_enable * ftrace_function_local_disable * SAVE_REGS - The ftrace_ops wants regs saved at each function called @@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_DYNAMIC = 1 << 1, - FTRACE_OPS_FL_CONTROL = 1 << 2, + FTRACE_OPS_FL_PER_CPU = 1 << 2, FTRACE_OPS_FL_SAVE_REGS = 1 << 3, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, @@ -134,6 +134,7 @@ enum { FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, FTRACE_OPS_FL_IPMODIFY = 1 << 13, FTRACE_OPS_FL_PID = 1 << 14, + FTRACE_OPS_FL_RCU = 1 << 15, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -146,11 +147,11 @@ struct ftrace_ops_hash { #endif /* - * Note, ftrace_ops can be referenced outside of RCU protection. - * (Although, for perf, the control ops prevent that). If ftrace_ops is - * allocated and not part of kernel core data, the unregistering of it will - * perform a scheduling on all CPUs to make sure that there are no more users. - * Depending on the load of the system that may take a bit of time. + * Note, ftrace_ops can be referenced outside of RCU protection, unless + * the RCU flag is set. If ftrace_ops is allocated and not part of kernel + * core data, the unregistering of it will perform a scheduling on all CPUs + * to make sure that there are no more users. Depending on the load of the + * system that may take a bit of time. * * Any private data added must also take care not to be freed and if private * data is added to a ftrace_ops that is in core code, the user of the @@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops); void clear_ftrace_function(void); /** - * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu + * ftrace_function_local_enable - enable ftrace_ops on current cpu * * This function enables tracing on current cpu by decreasing * the per cpu control variable. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline void ftrace_function_local_enable(struct ftrace_ops *ops) { - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) return; (*this_cpu_ptr(ops->disabled))--; } /** - * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu + * ftrace_function_local_disable - disable ftrace_ops on current cpu * - * This function enables tracing on current cpu by decreasing + * This function disables tracing on current cpu by increasing * the per cpu control variable. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) { - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) return; (*this_cpu_ptr(ops->disabled))++; @@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) * * This function returns value of ftrace_ops::disabled on current cpu. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) { - WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); + WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); return *this_cpu_ptr(ops->disabled); } @@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void); struct dyn_ftrace; +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN, + FTRACE_BUG_INIT, + FTRACE_BUG_NOP, + FTRACE_BUG_CALL, + FTRACE_BUG_UPDATE, +}; +extern enum ftrace_bug_type ftrace_bug_type; + +/* + * Archs can set this to point to a variable that holds the value that was + * expected at the call site before calling ftrace_bug(). + */ +extern const void *ftrace_expected; + void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; @@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr); * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. * IPMODIFY - the record allows for the IP address to be changed. + * DISABLED - the record is not ready to be touched yet * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -355,10 +372,11 @@ enum { FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_IPMODIFY = (1UL << 26), + FTRACE_FL_DISABLED = (1UL << 25), }; -#define FTRACE_REF_MAX_SHIFT 26 -#define FTRACE_FL_BITS 6 +#define FTRACE_REF_MAX_SHIFT 25 +#define FTRACE_FL_BITS 7 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) @@ -586,6 +604,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); extern void ftrace_module_init(struct module *mod); +extern void ftrace_release_mod(struct module *mod); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 7ff168d06967..29d4385903d4 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -30,10 +30,12 @@ #ifndef __GENALLOC_H__ #define __GENALLOC_H__ +#include <linux/types.h> #include <linux/spinlock_types.h> struct device; struct device_node; +struct gen_pool; /** * Allocation callback function type definition @@ -47,7 +49,7 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data); + void *data, struct gen_pool *pool); /* * General purpose special memory pool descriptor. @@ -75,6 +77,20 @@ struct gen_pool_chunk { unsigned long bits[0]; /* bitmap for allocating memory chunk */ }; +/* + * gen_pool data descriptor for gen_pool_first_fit_align. + */ +struct genpool_data_align { + int align; /* alignment by bytes for starting address */ +}; + +/* + * gen_pool data descriptor for gen_pool_fixed_alloc. + */ +struct genpool_data_fixed { + unsigned long offset; /* The offset of the specific region */ +}; + extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, @@ -98,6 +114,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, } extern void gen_pool_destroy(struct gen_pool *); extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); +extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, + genpool_algo_t algo, void *data); extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); @@ -110,14 +128,26 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data); extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data); + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool); + +extern unsigned long gen_pool_fixed_alloc(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool); + +extern unsigned long gen_pool_first_fit_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool); + extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data); + void *data, struct gen_pool *pool); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data); + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool); + extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid, const char *name); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 847cc1d91634..5c706765404a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -162,6 +162,7 @@ struct disk_part_tbl { }; struct disk_events; +struct badblocks; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -213,6 +214,7 @@ struct gendisk { struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; + struct badblocks *bb; }; static inline struct gendisk *part_to_disk(struct hd_struct *part) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8942af0813e3..28ad5f6494b0 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -30,7 +30,7 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_ATOMIC 0x80000u -#define ___GFP_NOACCOUNT 0x100000u +#define ___GFP_ACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -73,11 +73,15 @@ struct vm_area_struct; * * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. + * + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant + * to kmem allocations). */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) /* * Watermark modifiers -- controls access to emergency reserves @@ -104,7 +108,6 @@ struct vm_area_struct; #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) -#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* * Reclaim modifiers @@ -197,6 +200,9 @@ struct vm_area_struct; * GFP_KERNEL is typical for kernel-internal allocations. The caller requires * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * + * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * * GFP_NOWAIT is for kernel allocations that should not stall for direct * reclaim, start physical IO or use any filesystem callback. * @@ -236,6 +242,7 @@ struct vm_area_struct; */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) @@ -271,7 +278,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { - return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); + return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } #ifdef CONFIG_HIGHMEM @@ -377,10 +384,11 @@ static inline enum zone_type gfp_zone(gfp_t flags) static inline int gfp_zonelist(gfp_t flags) { - if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE)) - return 1; - - return 0; +#ifdef CONFIG_NUMA + if (unlikely(flags & __GFP_THISNODE)) + return ZONELIST_NOFALLBACK; +#endif + return ZONELIST_FALLBACK; } /* diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index d1baebf350d8..82fda487453f 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -8,6 +8,7 @@ #include <linux/irqdomain.h> #include <linux/lockdep.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/kconfig.h> struct device; struct gpio_desc; @@ -20,9 +21,10 @@ struct seq_file; /** * struct gpio_chip - abstract a GPIO controller * @label: for diagnostics - * @dev: optional device providing the GPIOs + * @parent: optional parent device providing the GPIOs * @cdev: class device used by sysfs interface (may be NULL) * @owner: helps prevent removal of modules exporting active GPIOs + * @data: per-instance data assigned by the driver * @list: links gpio_chips together for traversal * @request: optional hook for chip-specific activation, such as * enabling module power and clock; may sleep @@ -32,8 +34,7 @@ struct seq_file; * (same as GPIOF_DIR_XXX), or negative error * @direction_input: configures signal "offset" as input, or returns error * @direction_output: configures signal "offset" as output, or returns error - * @get: returns value for signal "offset"; for output signals this - * returns either the value actually sensed, or zero + * @get: returns value for signal "offset", 0=low, 1=high, or negative error * @set: assigns output value for signal "offset" * @set_multiple: assigns output values for multiple signals defined by "mask" * @set_debounce: optional hook for setting debounce time for specified gpio in @@ -65,6 +66,23 @@ struct seq_file; * registers. * @irq_not_threaded: flag must be set if @can_sleep is set but the * IRQs don't need to be threaded + * @read_reg: reader function for generic GPIO + * @write_reg: writer function for generic GPIO + * @pin2mask: some generic GPIO controllers work with the big-endian bits + * notation, e.g. in a 8-bits register, GPIO7 is the least significant + * bit. This callback assigns the right bit mask. + * @reg_dat: data (in) register for generic GPIO + * @reg_set: output set register (out=high) for generic GPIO + * @reg_clk: output clear register (out=low) for generic GPIO + * @reg_dir: direction setting register for generic GPIO + * @bgpio_bits: number of register bits used for a generic GPIO i.e. + * <register width> * 8 + * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep + * shadowed and real data registers writes together. + * @bgpio_data: shadowed data register for generic GPIO to clear/set bits + * safely. + * @bgpio_dir: shadowed direction register for generic GPIO to clear/set + * direction safely. * @irqchip: GPIO IRQ chip impl, provided by GPIO driver * @irqdomain: Interrupt translation domain; responsible for mapping * between GPIO hwirq number and linux irq number @@ -89,9 +107,10 @@ struct seq_file; */ struct gpio_chip { const char *label; - struct device *dev; + struct device *parent; struct device *cdev; struct module *owner; + void *data; struct list_head list; int (*request)(struct gpio_chip *chip, @@ -127,6 +146,20 @@ struct gpio_chip { bool can_sleep; bool irq_not_threaded; +#if IS_ENABLED(CONFIG_GPIO_GENERIC) + unsigned long (*read_reg)(void __iomem *reg); + void (*write_reg)(void __iomem *reg, unsigned long data); + unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin); + void __iomem *reg_dat; + void __iomem *reg_set; + void __iomem *reg_clr; + void __iomem *reg_dir; + int bgpio_bits; + spinlock_t bgpio_lock; + unsigned long bgpio_data; + unsigned long bgpio_dir; +#endif + #ifdef CONFIG_GPIOLIB_IRQCHIP /* * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib @@ -166,7 +199,11 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset); /* add/remove chips */ -extern int gpiochip_add(struct gpio_chip *chip); +extern int gpiochip_add_data(struct gpio_chip *chip, void *data); +static inline int gpiochip_add(struct gpio_chip *chip) +{ + return gpiochip_add_data(chip, NULL); +} extern void gpiochip_remove(struct gpio_chip *chip); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); @@ -175,8 +212,36 @@ extern struct gpio_chip *gpiochip_find(void *data, int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +/* get driver data */ +static inline void *gpiochip_get_data(struct gpio_chip *chip) +{ + return chip->data; +} + struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); +struct bgpio_pdata { + const char *label; + int base; + int ngpio; +}; + +#if IS_ENABLED(CONFIG_GPIO_GENERIC) + +int bgpio_init(struct gpio_chip *gc, struct device *dev, + unsigned long sz, void __iomem *dat, void __iomem *set, + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, + unsigned long flags); + +#define BGPIOF_BIG_ENDIAN BIT(0) +#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */ +#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ +#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) +#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ +#define BGPIOF_NO_OUTPUT BIT(5) /* only input */ + +#endif + #ifdef CONFIG_GPIOLIB_IRQCHIP void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 519b6e2d769e..661e5c2a8e2a 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -16,6 +16,10 @@ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index 1acb1445e05f..e31bcd4c7859 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -101,7 +101,7 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size); /* May be used by hardware driver to gain control over HDLC device */ -void detach_hdlc_protocol(struct net_device *dev); +int detach_hdlc_protocol(struct net_device *dev); static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, struct net_device *dev) diff --git a/include/linux/hid.h b/include/linux/hid.h index 251a1d382e23..75b66eccc692 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -168,6 +168,8 @@ struct hid_item { #define HID_UP_MSVENDOR 0xff000000 #define HID_UP_CUSTOM 0x00ff0000 #define HID_UP_LOGIVENDOR 0xffbc0000 +#define HID_UP_LOGIVENDOR2 0xff090000 +#define HID_UP_LOGIVENDOR3 0xff430000 #define HID_UP_LNVENDOR 0xffa00000 #define HID_UP_SENSOR 0x00200000 @@ -563,6 +565,9 @@ struct hid_device { /* device report descriptor */ wait_queue_head_t debug_wait; }; +#define to_hid_device(pdev) \ + container_of(pdev, struct hid_device, dev) + static inline void *hid_get_drvdata(struct hid_device *hdev) { return dev_get_drvdata(&hdev->dev); @@ -712,6 +717,9 @@ struct hid_driver { struct device_driver driver; }; +#define to_hid_driver(pdrv) \ + container_of(pdrv, struct hid_driver, driver) + /** * hid_ll_driver - low level driver callbacks * @start: called on probe to start the device diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 5dd60c2e120f..2790591c77cf 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -135,9 +135,6 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info, * @device: Driver model representation of the device * @tx_cfg: HSI TX configuration * @rx_cfg: HSI RX configuration - * @e_handler: Callback for handling port events (RX Wake High/Low) - * @pclaimed: Keeps tracks if the clients claimed its associated HSI port - * @nb: Notifier block for port events */ struct hsi_client { struct device device; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ecb080d6ff42..cfe81e10bd54 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -19,13 +19,16 @@ extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); +extern int madvise_free_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, unsigned long next); extern int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr); extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); -extern int move_huge_pmd(struct vm_area_struct *vma, +extern bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, @@ -34,8 +37,7 @@ extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned long pfn, bool write); - + pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, @@ -48,21 +50,13 @@ enum transparent_hugepage_flag { #endif }; -enum page_check_address_pmd_flag { - PAGE_CHECK_ADDRESS_PMD_FLAG, - PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, - PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, -}; -extern pmd_t *page_check_address_pmd(struct page *page, - struct mm_struct *mm, - unsigned long address, - enum page_check_address_pmd_flag flag, - spinlock_t **ptl); - #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #ifdef CONFIG_TRANSPARENT_HUGEPAGE +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags); + #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) @@ -95,30 +89,28 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); #endif /* CONFIG_DEBUG_VM */ extern unsigned long transparent_hugepage_flags; -extern int split_huge_page_to_list(struct page *page, struct list_head *list); + +extern void prep_transhuge_page(struct page *page); +extern void free_transhuge_page(struct page *page); + +int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) { return split_huge_page_to_list(page, NULL); } -extern void __split_huge_page_pmd(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd); -#define split_huge_page_pmd(__vma, __address, __pmd) \ +void deferred_split_huge_page(struct page *page); + +void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address); + +#define split_huge_pmd(__vma, __pmd, __address) \ do { \ pmd_t *____pmd = (__pmd); \ - if (unlikely(pmd_trans_huge(*____pmd))) \ - __split_huge_page_pmd(__vma, __address, \ - ____pmd); \ + if (pmd_trans_huge(*____pmd) \ + || pmd_devmap(*____pmd)) \ + __split_huge_pmd(__vma, __pmd, __address); \ } while (0) -#define wait_split_huge_page(__anon_vma, __pmd) \ - do { \ - pmd_t *____pmd = (__pmd); \ - anon_vma_lock_write(__anon_vma); \ - anon_vma_unlock_write(__anon_vma); \ - BUG_ON(pmd_trans_splitting(*____pmd) || \ - pmd_trans_huge(*____pmd)); \ - } while (0) -extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, - pmd_t *pmd); + #if HPAGE_PMD_ORDER >= MAX_ORDER #error "hugepages can't be allocated by the buddy allocator" #endif @@ -128,17 +120,17 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next); -extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, +extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl); /* mmap_sem must be held on entry */ -static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, +static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); - if (pmd_trans_huge(*pmd)) + if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) return __pmd_trans_huge_lock(pmd, vma, ptl); else - return 0; + return false; } static inline int hpage_nr_pages(struct page *page) { @@ -183,11 +175,8 @@ static inline int split_huge_page(struct page *page) { return 0; } -#define split_huge_page_pmd(__vma, __address, __pmd) \ - do { } while (0) -#define wait_split_huge_page(__anon_vma, __pmd) \ - do { } while (0) -#define split_huge_page_pmd_mm(__mm, __address, __pmd) \ +static inline void deferred_split_huge_page(struct page *page) {} +#define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -201,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next) { } -static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, +static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { - return 0; + return false; } static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, @@ -218,6 +207,12 @@ static inline bool is_huge_zero_page(struct page *page) return false; } + +static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd, int flags) +{ + return NULL; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 685c262e0be8..7d953c2542a8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -8,6 +8,7 @@ #include <linux/cgroup.h> #include <linux/list.h> #include <linux/kref.h> +#include <asm/pgtable.h> struct ctl_table; struct user_struct; @@ -96,9 +97,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct address_space *mapping, pgoff_t idx, unsigned long address); -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); -#endif extern int hugepages_treat_as_movable; extern int sysctl_hugetlb_shm_group; @@ -265,20 +264,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, struct user_struct **user, int creat_flags, int page_size_log); -static inline int is_file_hugepages(struct file *file) +static inline bool is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) - return 1; - if (is_file_shm_hugepages(file)) - return 1; + return true; - return 0; + return is_file_shm_hugepages(file); } #else /* !CONFIG_HUGETLBFS */ -#define is_file_hugepages(file) 0 +#define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 8fdc17b84739..753dbad0bf94 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -141,8 +141,6 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, { u32 read_loc, write_loc, dsize; - smp_read_barrier_depends(); - /* Capture the read/write indices before they changed */ read_loc = rbi->ring_buffer->read_index; write_loc = rbi->ring_buffer->write_index; @@ -630,6 +628,11 @@ struct hv_input_signal_event_buffer { struct hv_input_signal_event event; }; +enum hv_signal_policy { + HV_SIGNAL_POLICY_DEFAULT = 0, + HV_SIGNAL_POLICY_EXPLICIT, +}; + struct vmbus_channel { /* Unique channel id */ int id; @@ -757,8 +760,21 @@ struct vmbus_channel { * link up channels based on their CPU affinity. */ struct list_head percpu_list; + /* + * Host signaling policy: The default policy will be + * based on the ring buffer state. We will also support + * a policy where the client driver can have explicit + * signaling control. + */ + enum hv_signal_policy signal_policy; }; +static inline void set_channel_signal_state(struct vmbus_channel *c, + enum hv_signal_policy policy) +{ + c->signal_policy = policy; +} + static inline void set_channel_read_state(struct vmbus_channel *c, bool state) { c->batched_reading = state; @@ -983,16 +999,8 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t size, resource_size_t align, bool fb_overlap_ok); -/** - * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device - * - * This macro is used to create a struct hv_vmbus_device_id that matches a - * specific device. - */ -#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf) \ - .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf }, +int vmbus_cpu_number_to_vp_number(int cpu_number); +u64 hv_do_hypercall(u64 control, void *input, void *output); /* * GUID definitions of various offer types - services offered to the guest. @@ -1003,118 +1011,102 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ - .guid = { \ - 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ - 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ - } + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ - .guid = { \ - 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ - 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ - } + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ - .guid = { \ - 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ - 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ - } + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ - .guid = { \ - 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ - 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ - } + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ - .guid = { \ - 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ - 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ - } + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ - .guid = { \ - 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ - 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ - } + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ - .guid = { \ - 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ - 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ - } + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ - .guid = { \ - 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ - 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ - } + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ - .guid = { \ - 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ - 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ - } + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + +/* + * Keyboard GUID + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} + */ +#define HV_KBD_GUID \ + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ - .guid = { \ - 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ - 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ - } + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ - .guid = { \ - 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ - 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ - } + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ - .guid = { \ - 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ - 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ - } + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service @@ -1122,20 +1114,25 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, */ #define HV_FCOPY_GUID \ - .guid = { \ - 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ - 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ - } + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ - .guid = { \ - 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ - 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ - } + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + +/* + * PCI Express Pass Through + * {44C4F61D-4444-4400-9D52-802E27EDE19F} + */ + +#define HV_PCIE_GUID \ + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Common header for Hyper-V ICs diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 768063baafbf..200cf13b00f6 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -414,6 +414,22 @@ struct i2c_algorithm { }; /** + * struct i2c_timings - I2C timing information + * @bus_freq_hz: the bus frequency in Hz + * @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification + * @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification + * @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns + * @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification + */ +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; +}; + +/** * struct i2c_bus_recovery_info - I2C bus recovery information * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). @@ -493,6 +509,8 @@ struct i2c_adapter_quirks { /* convenience macro for typical write-then read case */ #define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR) +/* clock stretching is not supported */ +#define I2C_AQ_NO_CLK_STRETCH BIT(4) /* * i2c_adapter is the structure used to identify a physical i2c bus along @@ -602,6 +620,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap, extern struct i2c_adapter *i2c_get_adapter(int nr); extern void i2c_put_adapter(struct i2c_adapter *adap); +void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults); /* Return the functionality mask */ static inline u32 i2c_get_functionality(struct i2c_adapter *adap) @@ -615,6 +634,20 @@ static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func) return (func & i2c_get_functionality(adap)) == func; } +/** + * i2c_check_quirks() - Function for checking the quirk flags in an i2c adapter + * @adap: i2c adapter + * @quirks: quirk flags + * + * Return: true if the adapter has all the specified quirk flags, false if not + */ +static inline bool i2c_check_quirks(struct i2c_adapter *adap, u64 quirks) +{ + if (!adap->quirks) + return false; + return (adap->quirks->flags & quirks) == quirks; +} + /* Return the adapter number for a specific adapter */ static inline int i2c_adapter_id(struct i2c_adapter *adap) { @@ -622,7 +655,7 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) } /** - * module_i2c_driver() - Helper macro for registering a I2C driver + * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct * * Helper macro for I2C drivers which do not do anything special in module @@ -633,6 +666,17 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) module_driver(__i2c_driver, i2c_add_driver, \ i2c_del_driver) +/** + * builtin_i2c_driver() - Helper macro for registering a builtin I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in their + * init. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(). + */ +#define builtin_i2c_driver(__i2c_driver) \ + builtin_driver(__i2c_driver, i2c_add_driver) + #endif /* I2C */ #if IS_ENABLED(CONFIG_OF) @@ -644,6 +688,7 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) /* must call i2c_put_adapter() when done with returned i2c_adapter device */ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); + #else static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index b49cf923becc..ba7a9b0c7c57 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -91,7 +91,6 @@ enum { PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ PPPOX_BOUND = 2, /* bound to ppp device */ PPPOX_RELAY = 4, /* forwarding is enabled */ - PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */ PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ }; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index a6aa970758a2..b84e49c3a738 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -164,6 +164,7 @@ struct team_mode { size_t priv_size; size_t port_priv_size; const struct team_mode_ops *ops; + enum netdev_lag_tx_type lag_tx_type; }; #define TEAM_PORT_HASHBITS 4 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 67ce5bd3b56a..a5f6ce6b578c 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -static inline bool is_vlan_dev(struct net_device *dev) +static inline bool is_vlan_dev(const struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; } @@ -621,7 +621,7 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | - NETIF_F_GEN_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h new file mode 100644 index 000000000000..767467d886de --- /dev/null +++ b/include/linux/iio/buffer-dma.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Licensed under the GPL-2. + */ + +#ifndef __INDUSTRIALIO_DMA_BUFFER_H__ +#define __INDUSTRIALIO_DMA_BUFFER_H__ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/iio/buffer.h> + +struct iio_dma_buffer_queue; +struct iio_dma_buffer_ops; +struct device; + +struct iio_buffer_block { + u32 size; + u32 bytes_used; +}; + +/** + * enum iio_block_state - State of a struct iio_dma_buffer_block + * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued + * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue + * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA + * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue + * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed + */ +enum iio_block_state { + IIO_BLOCK_STATE_DEQUEUED, + IIO_BLOCK_STATE_QUEUED, + IIO_BLOCK_STATE_ACTIVE, + IIO_BLOCK_STATE_DONE, + IIO_BLOCK_STATE_DEAD, +}; + +/** + * struct iio_dma_buffer_block - IIO buffer block + * @head: List head + * @size: Total size of the block in bytes + * @bytes_used: Number of bytes that contain valid data + * @vaddr: Virutal address of the blocks memory + * @phys_addr: Physical address of the blocks memory + * @queue: Parent DMA buffer queue + * @kref: kref used to manage the lifetime of block + * @state: Current state of the block + */ +struct iio_dma_buffer_block { + /* May only be accessed by the owner of the block */ + struct list_head head; + size_t bytes_used; + + /* + * Set during allocation, constant thereafter. May be accessed read-only + * by anybody holding a reference to the block. + */ + void *vaddr; + dma_addr_t phys_addr; + size_t size; + struct iio_dma_buffer_queue *queue; + + /* Must not be accessed outside the core. */ + struct kref kref; + /* + * Must not be accessed outside the core. Access needs to hold + * queue->list_lock if the block is not owned by the core. + */ + enum iio_block_state state; +}; + +/** + * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer + * @blocks: Buffer blocks used for fileio + * @active_block: Block being used in read() + * @pos: Read offset in the active block + * @block_size: Size of each block + */ +struct iio_dma_buffer_queue_fileio { + struct iio_dma_buffer_block *blocks[2]; + struct iio_dma_buffer_block *active_block; + size_t pos; + size_t block_size; +}; + +/** + * struct iio_dma_buffer_queue - DMA buffer base structure + * @buffer: IIO buffer base structure + * @dev: Parent device + * @ops: DMA buffer callbacks + * @lock: Protects the incoming list, active and the fields in the fileio + * substruct + * @list_lock: Protects lists that contain blocks which can be modified in + * atomic context as well as blocks on those lists. This is the outgoing queue + * list and typically also a list of active blocks in the part that handles + * the DMA controller + * @incoming: List of buffers on the incoming queue + * @outgoing: List of buffers on the outgoing queue + * @active: Whether the buffer is currently active + * @fileio: FileIO state + */ +struct iio_dma_buffer_queue { + struct iio_buffer buffer; + struct device *dev; + const struct iio_dma_buffer_ops *ops; + + struct mutex lock; + spinlock_t list_lock; + struct list_head incoming; + struct list_head outgoing; + + bool active; + + struct iio_dma_buffer_queue_fileio fileio; +}; + +/** + * struct iio_dma_buffer_ops - DMA buffer callback operations + * @submit: Called when a block is submitted to the DMA controller + * @abort: Should abort all pending transfers + */ +struct iio_dma_buffer_ops { + int (*submit)(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + void (*abort)(struct iio_dma_buffer_queue *queue); +}; + +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list); + +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer); +size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_request_update(struct iio_buffer *buffer); + +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dma_dev, const struct iio_dma_buffer_ops *ops); +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); + +#endif diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h new file mode 100644 index 000000000000..5dcddf427bb0 --- /dev/null +++ b/include/linux/iio/buffer-dmaengine.h @@ -0,0 +1,18 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_DMAENGINE_H__ +#define __IIO_DMAENGINE_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); + +#endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 1600c55828e0..2ec3ad58e8a0 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -18,6 +18,12 @@ struct iio_buffer; /** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + +/** * struct iio_buffer_access_funcs - access functions for buffers. * @store_to: actually store stuff to the buffer * @read_first_n: try to get a specified number of bytes (must exist) @@ -27,9 +33,15 @@ struct iio_buffer; * storage. * @set_bytes_per_datum:set number of bytes per datum * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * * The purpose of this structure is to make the buffer element * modular as event for a given driver, different usecases may require @@ -51,9 +63,13 @@ struct iio_buffer_access_funcs { int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); int (*set_length)(struct iio_buffer *buffer, int length); + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + void (*release)(struct iio_buffer *buffer); unsigned int modes; + unsigned int flags; }; /** diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h new file mode 100644 index 000000000000..93befd67c15c --- /dev/null +++ b/include/linux/iio/configfs.h @@ -0,0 +1,15 @@ +/* + * Industrial I/O configfs support + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef __IIO_CONFIGFS +#define __IIO_CONFIGFS + +extern struct configfs_subsystem iio_configfs_subsys; + +#endif /* __IIO_CONFIGFS */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 19c94c9acc81..b5894118755f 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -636,6 +636,8 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) } #endif +ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); + int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, int *fract); diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h new file mode 100644 index 000000000000..5198f8ed08a4 --- /dev/null +++ b/include/linux/iio/sw_trigger.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software trigger interface + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_TRIGGER +#define __IIO_SW_TRIGGER + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/configfs.h> + +#define module_iio_sw_trigger_driver(__iio_sw_trigger_type) \ + module_driver(__iio_sw_trigger_type, iio_register_sw_trigger_type, \ + iio_unregister_sw_trigger_type) + +struct iio_sw_trigger_ops; + +struct iio_sw_trigger_type { + const char *name; + struct module *owner; + const struct iio_sw_trigger_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_trigger { + struct iio_trigger *trigger; + struct iio_sw_trigger_type *trigger_type; + struct config_group group; +}; + +struct iio_sw_trigger_ops { + struct iio_sw_trigger* (*probe)(const char *); + int (*remove)(struct iio_sw_trigger *); +}; + +static inline +struct iio_sw_trigger *to_iio_sw_trigger(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_trigger, + group); +} + +int iio_register_sw_trigger_type(struct iio_sw_trigger_type *tt); +void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); + +struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); +void iio_sw_trigger_destroy(struct iio_sw_trigger *); + +int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); +void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); + +static inline +void iio_swt_group_init_type_name(struct iio_sw_trigger *t, + const char *name, + struct config_item_type *type) +{ +#ifdef CONFIG_CONFIGFS_FS + config_group_init_type_name(&t->group, name, type); +#endif +} + +#endif /* __IIO_SW_TRIGGER */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 0e707f0c1a3e..7c27fa1030e8 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -3,6 +3,7 @@ #include <uapi/linux/inet_diag.h> +struct net; struct sock; struct inet_hashinfo; struct nlattr; @@ -23,6 +24,10 @@ struct inet_diag_handler { void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); + + int (*destroy)(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req); + __u16 idiag_type; __u16 idiag_info_size; }; @@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); +struct sock *inet_diag_find_one_icsk(struct net *net, + struct inet_hashinfo *hashinfo, + const struct inet_diag_req_v2 *req); + int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); extern int inet_diag_register(const struct inet_diag_handler *handler); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1c1ff7e4faa4..f2cb8d45513d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -150,7 +150,7 @@ extern struct task_group root_task_group; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ .vtime_snap = 0, \ .vtime_snap_whence = VTIME_SYS, #else diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ad16809c8596..cb30edbfe9fc 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -195,6 +195,7 @@ extern void disable_irq(unsigned int irq); extern void disable_percpu_irq(unsigned int irq); extern void enable_irq(unsigned int irq); extern void enable_percpu_irq(unsigned int irq, unsigned int type); +extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); /* The following three functions are for the core kernel use only. */ diff --git a/include/linux/io.h b/include/linux/io.h index de64c1e53612..fffd88d7f426 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -89,21 +89,6 @@ void devm_memunmap(struct device *dev, void *addr); void *__devm_memremap_pages(struct device *dev, struct resource *res); -#ifdef CONFIG_ZONE_DEVICE -void *devm_memremap_pages(struct device *dev, struct resource *res); -#else -static inline void *devm_memremap_pages(struct device *dev, struct resource *res) -{ - /* - * Fail attempts to call devm_memremap_pages() without - * ZONE_DEVICE support enabled, this requires callers to fall - * back to plain devm_memremap() based on config - */ - WARN_ON_ONCE(1); - return ERR_PTR(-ENXIO); -} -#endif - /* * Some systems do not have legacy ISA devices. * /dev/port is not a valid interface on these systems. diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index bae69e5d693c..9c940263ca23 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -103,10 +103,21 @@ struct device_node; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); +/* + * Subdrivers that need some preparatory work can initialize their + * chips and call this to register their GICs. + */ +int gic_of_init(struct device_node *node, struct device_node *parent); + +/* + * Legacy platforms not converted to DT yet must use this to init + * their GIC + */ void gic_init(unsigned int nr, int start, void __iomem *dist , void __iomem *cpu); -int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a587a33363c7..dcca77c4b9d2 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -1,6 +1,8 @@ #ifndef _LINUX_IRQDESC_H #define _LINUX_IRQDESC_H +#include <linux/rcupdate.h> + /* * Core internal functions to deal with irq descriptors */ @@ -40,6 +42,7 @@ struct pt_regs; * IRQF_NO_SUSPEND set * @force_resume_depth: number of irqactions on a irq descriptor with * IRQF_FORCE_RESUME set + * @rcu: rcu head for delayed free * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -82,6 +85,9 @@ struct irq_desc { #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif +#ifdef CONFIG_SPARSE_IRQ + struct rcu_head rcu; +#endif int parent_irq; struct module *owner; const char *name; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d5e5c5bef28c..f64622ad02c1 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -211,6 +211,11 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) return node ? &node->fwnode : NULL; } +static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_IRQCHIP; +} + static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { @@ -367,6 +372,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); } +extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, @@ -410,6 +418,11 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) static inline void irq_dispose_mapping(unsigned int virq) { } static inline void irq_domain_activate_irq(struct irq_data *data) { } static inline void irq_domain_deactivate_irq(struct irq_data *data) { } +static inline struct irq_domain *irq_find_matching_fwnode( + struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) +{ + return NULL; +} #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index 052c7b32cc91..8e9e288b08c1 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -35,11 +35,6 @@ static inline dev_t old_decode_dev(u16 val) return MKDEV((val >> 8) & 255, val & 255); } -static inline bool new_valid_dev(dev_t dev) -{ - return 1; -} - static inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 350dfb08aee3..f31638c6e873 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -202,26 +202,26 @@ extern int _cond_resched(void); /** * abs - return absolute value of an argument - * @x: the value. If it is unsigned type, it is converted to signed type first - * (s64, long or int depending on its size). + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. * - * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, - * otherwise it is signed long. + * Return: an absolute value of x. */ -#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ - s64 __x = (x); \ - (__x < 0) ? -__x : __x; \ - }), ({ \ - long ret; \ - if (sizeof(x) == sizeof(long)) { \ - long __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } else { \ - int __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } \ - ret; \ - })) +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) @@ -255,6 +255,7 @@ extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic_self_stop(struct pt_regs *); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); @@ -446,6 +447,33 @@ extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; /* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * A variant of panic() called from NMI context. We return if we've already + * panicked on this CPU. If another CPU already panicked, loop in + * nmi_panic_self_stop() which can provide architecture dependent code such + * as saving register state for crash dump. + */ +#define nmi_panic(regs, fmt, ...) \ +do { \ + int old_cpu, cpu; \ + \ + cpu = raw_smp_processor_id(); \ + old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \ + \ + if (old_cpu == PANIC_CPU_INVALID) \ + panic(fmt, ##__VA_ARGS__); \ + else if (old_cpu != cpu) \ + nmi_panic_self_stop(regs); \ +} while (0) + +/* * Only to be used by arch init code. If the user over-wrote the default * CONFIG_PANIC_TIMEOUT, honor it. */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5d4e9c4b821d..af51df35d749 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -274,6 +274,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn); struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns); +struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, + const char *path, const void *ns); void kernfs_get(struct kernfs_node *kn); void kernfs_put(struct kernfs_node *kn); @@ -350,6 +352,10 @@ static inline struct kernfs_node * kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns) { return NULL; } +static inline struct kernfs_node * +kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path, + const void *ns) +{ return NULL; } static inline void kernfs_get(struct kernfs_node *kn) { } static inline void kernfs_put(struct kernfs_node *kn) { } @@ -431,6 +437,12 @@ kernfs_find_and_get(struct kernfs_node *kn, const char *name) } static inline struct kernfs_node * +kernfs_walk_and_get(struct kernfs_node *kn, const char *path) +{ + return kernfs_walk_and_get_ns(kn, path, NULL); +} + +static inline struct kernfs_node * kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, void *priv) { diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d140b1e9faa7..7b68d2788a56 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -237,6 +237,7 @@ extern int kexec_purgatory_get_set_symbol(struct kimage *image, unsigned int size, bool get_value); extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); void crash_save_cpu(struct pt_regs *regs, int cpu); @@ -332,6 +333,7 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; +static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } #define kexec_in_progress false diff --git a/include/linux/key.h b/include/linux/key.h index 66f705243985..7321ab8ef949 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -177,6 +177,7 @@ struct key { #define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ #define KEY_FLAG_BUILTIN 10 /* set if key is builtin */ #define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 12 /* set if key should not be removed */ /* the key type and key description string * - the desc is used to match a key against search criteria diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c923350ca20a..861f690aa791 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -66,7 +66,7 @@ * error pfns indicate that the gfn is in slot but faild to * translate it to pfn on host. */ -static inline bool is_error_pfn(pfn_t pfn) +static inline bool is_error_pfn(kvm_pfn_t pfn) { return !!(pfn & KVM_PFN_ERR_MASK); } @@ -76,13 +76,13 @@ static inline bool is_error_pfn(pfn_t pfn) * translated to pfn - it is not in slot or failed to * translate it to pfn. */ -static inline bool is_error_noslot_pfn(pfn_t pfn) +static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) { return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); } /* noslot pfn indicates that the gfn is not in slot. */ -static inline bool is_noslot_pfn(pfn_t pfn) +static inline bool is_noslot_pfn(kvm_pfn_t pfn) { return pfn == KVM_PFN_NOSLOT; } @@ -111,38 +111,13 @@ static inline bool is_error_page(struct page *page) } /* - * vcpu->requests bit members + * Architecture-independent vcpu->requests bit members + * Bits 4-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH 0 -#define KVM_REQ_MIGRATE_TIMER 1 -#define KVM_REQ_REPORT_TPR_ACCESS 2 -#define KVM_REQ_MMU_RELOAD 3 -#define KVM_REQ_TRIPLE_FAULT 4 -#define KVM_REQ_PENDING_TIMER 5 -#define KVM_REQ_UNHALT 6 -#define KVM_REQ_MMU_SYNC 7 -#define KVM_REQ_CLOCK_UPDATE 8 -#define KVM_REQ_KICK 9 -#define KVM_REQ_DEACTIVATE_FPU 10 -#define KVM_REQ_EVENT 11 -#define KVM_REQ_APF_HALT 12 -#define KVM_REQ_STEAL_UPDATE 13 -#define KVM_REQ_NMI 14 -#define KVM_REQ_PMU 15 -#define KVM_REQ_PMI 16 -#define KVM_REQ_WATCHDOG 17 -#define KVM_REQ_MASTERCLOCK_UPDATE 18 -#define KVM_REQ_MCLOCK_INPROGRESS 19 -#define KVM_REQ_EPR_EXIT 20 -#define KVM_REQ_SCAN_IOAPIC 21 -#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 -#define KVM_REQ_ENABLE_IBS 23 -#define KVM_REQ_DISABLE_IBS 24 -#define KVM_REQ_APIC_PAGE_RELOAD 25 -#define KVM_REQ_SMI 26 -#define KVM_REQ_HV_CRASH 27 -#define KVM_REQ_IOAPIC_EOI_EXIT 28 -#define KVM_REQ_HV_RESET 29 +#define KVM_REQ_MMU_RELOAD 1 +#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNHALT 3 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -318,6 +293,11 @@ struct kvm_s390_adapter_int { u32 adapter_id; }; +struct kvm_hv_sint { + u32 vcpu; + u32 sint; +}; + struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; @@ -331,6 +311,7 @@ struct kvm_kernel_irq_routing_entry { } irqchip; struct msi_msg msi; struct kvm_s390_adapter_int adapter; + struct kvm_hv_sint hv_sint; }; struct hlist_node link; }; @@ -439,10 +420,13 @@ struct kvm { /* The guest did something we don't support. */ #define vcpu_unimpl(vcpu, fmt, ...) \ - kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) + kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ + (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +#define vcpu_err(vcpu, fmt, ...) \ + kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { @@ -465,6 +449,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) struct kvm_vcpu *vcpu; int i; + if (id < 0 || id >= KVM_MAX_VCPUS) + return NULL; + vcpu = kvm_get_vcpu(kvm, id); + if (vcpu && vcpu->vcpu_id == id) + return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->vcpu_id == id) return vcpu; @@ -484,12 +473,12 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); -void kvm_arch_irq_routing_update(struct kvm *kvm); +void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } -static inline void kvm_arch_irq_routing_update(struct kvm *kvm) +static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) { } #endif @@ -602,19 +591,20 @@ void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); -pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); -pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); -pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, +kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); +kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); -pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); -pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); -pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, - bool *async, bool write_fault, bool *writable); +kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, + bool atomic, bool *async, bool write_fault, + bool *writable); -void kvm_release_pfn_clean(pfn_t pfn); -void kvm_set_pfn_dirty(pfn_t pfn); -void kvm_set_pfn_accessed(pfn_t pfn); -void kvm_get_pfn(pfn_t pfn); +void kvm_release_pfn_clean(kvm_pfn_t pfn); +void kvm_set_pfn_dirty(kvm_pfn_t pfn); +void kvm_set_pfn_accessed(kvm_pfn_t pfn); +void kvm_get_pfn(kvm_pfn_t pfn); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); @@ -634,14 +624,14 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); -int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); -pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); -pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); +kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); @@ -668,8 +658,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); -void kvm_make_mclock_inprogress_request(struct kvm *kvm); -void kvm_make_scan_ioapic_request(struct kvm *kvm); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); long kvm_arch_dev_ioctl(struct file *filp, @@ -824,7 +812,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -bool kvm_is_reserved_pfn(pfn_t pfn); +bool kvm_is_reserved_pfn(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -978,7 +966,7 @@ static inline gfn_t gpa_to_gfn(gpa_t gpa) return (gfn_t)(gpa >> PAGE_SHIFT); } -static inline hpa_t pfn_to_hpa(pfn_t pfn) +static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) { return (hpa_t)pfn << PAGE_SHIFT; } @@ -990,11 +978,6 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) return kvm_is_error_hva(hva); } -static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) -{ - set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); -} - enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, @@ -1004,7 +987,6 @@ struct kvm_stats_debugfs_item { const char *name; int offset; enum kvm_stat_kind kind; - struct dentry *dentry; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; @@ -1091,6 +1073,7 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) { } #endif +void kvm_arch_irq_routing_update(struct kvm *kvm); static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 00a97bb905db..35e568f04b1e 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -4,10 +4,8 @@ #include <uapi/linux/kvm_para.h> -static inline int kvm_para_has_feature(unsigned int feature) +static inline bool kvm_para_has_feature(unsigned int feature) { - if (kvm_arch_para_features() & (1UL << feature)) - return 1; - return 0; + return !!(kvm_arch_para_features() & (1UL << feature)); } #endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 1b47a185c2f0..8bf259dae9f6 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -53,7 +53,7 @@ typedef unsigned long hva_t; typedef u64 hpa_t; typedef u64 hfn_t; -typedef hfn_t pfn_t; +typedef hfn_t kvm_pfn_t; struct gfn_to_hva_cache { u64 generation; diff --git a/include/linux/leds.h b/include/linux/leds.h index fa359c79c825..bc1476fda96e 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -44,9 +44,9 @@ struct led_classdev { #define LED_BLINK_ONESHOT (1 << 17) #define LED_BLINK_ONESHOT_STOP (1 << 18) #define LED_BLINK_INVERT (1 << 19) -#define LED_SYSFS_DISABLE (1 << 20) -#define SET_BRIGHTNESS_ASYNC (1 << 21) -#define SET_BRIGHTNESS_SYNC (1 << 22) +#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) +#define LED_BLINK_DISABLE (1 << 21) +#define LED_SYSFS_DISABLE (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) /* Set LED brightness level */ @@ -57,8 +57,8 @@ struct led_classdev { * Set LED brightness level immediately - it can block the caller for * the time required for accessing a LED device register. */ - int (*brightness_set_sync)(struct led_classdev *led_cdev, - enum led_brightness brightness); + int (*brightness_set_blocking)(struct led_classdev *led_cdev, + enum led_brightness brightness); /* Get LED brightness level */ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); @@ -156,10 +156,25 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev, * * Set an LED's brightness, and, if necessary, cancel the * software blink timer that implements blinking when the - * hardware doesn't. + * hardware doesn't. This function is guaranteed not to sleep. */ extern void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness); + +/** + * led_set_brightness_sync - set LED brightness synchronously + * @led_cdev: the LED to set + * @brightness: the brightness to set it to + * + * Set an LED's brightness immediately. This function will block + * the caller for the time required for accessing device registers, + * and it can sleep. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value); + /** * led_update_brightness - update LED brightness * @led_cdev: the LED to query @@ -231,6 +246,8 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, /* Registration functions for complex triggers */ extern int led_trigger_register(struct led_trigger *trigger); extern void led_trigger_unregister(struct led_trigger *trigger); +extern int devm_led_trigger_register(struct device *dev, + struct led_trigger *trigger); extern void led_trigger_register_simple(const char *name, struct led_trigger **trigger); diff --git a/include/linux/libata.h b/include/linux/libata.h index 600c1e0626a5..851821bfd553 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -205,6 +205,7 @@ enum { ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ + ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 3f021dc5da8c..bed40dff0e86 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -116,6 +116,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } +int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nfit_desc, struct module *module); #define nvdimm_bus_register(parent, desc) \ diff --git a/include/linux/list.h b/include/linux/list.h index 993395a2e55c..30cf4200ab40 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -24,7 +24,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) { - list->next = list; + WRITE_ONCE(list->next, list); list->prev = list; } @@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new, next->prev = new; new->next = next; new->prev = prev; - prev->next = new; + WRITE_ONCE(prev->next, new); } #else extern void __list_add(struct list_head *new, @@ -113,6 +113,17 @@ extern void __list_del_entry(struct list_head *entry); extern void list_del(struct list_head *entry); #endif +#ifdef CONFIG_DEBUG_LIST +/* + * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one + * of the pages it allocates is ever passed to list_add() + */ +extern void list_force_poison(struct list_head *entry); +#else +/* fallback to the less strict LIST_POISON* definitions */ +#define list_force_poison list_del +#endif + /** * list_replace - replace old entry by new one * @old : the element to be replaced @@ -186,7 +197,7 @@ static inline int list_is_last(const struct list_head *list, */ static inline int list_empty(const struct list_head *head) { - return head->next == head; + return READ_ONCE(head->next) == head; } /** @@ -608,7 +619,7 @@ static inline int hlist_unhashed(const struct hlist_node *h) static inline int hlist_empty(const struct hlist_head *h) { - return !h->first; + return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) @@ -642,7 +653,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n->next = first; if (first) first->pprev = &n->next; - h->first = n; + WRITE_ONCE(h->first, n); n->pprev = &h->first; } @@ -653,14 +664,14 @@ static inline void hlist_add_before(struct hlist_node *n, n->pprev = next->pprev; n->next = next; next->pprev = &n->next; - *(n->pprev) = n; + WRITE_ONCE(*(n->pprev), n); } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; - prev->next = n; + WRITE_ONCE(prev->next, n); n->pprev = &prev->next; if (n->next) diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 8132214e8efd..ee7229a6c06a 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, static inline int hlist_bl_empty(const struct hlist_bl_head *h) { - return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); + return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_add_head(struct hlist_bl_node *n, diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 444d2b1313bd..b01fe1009084 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) { - return is_a_nulls(h->first); + return is_a_nulls(READ_ONCE(h->first)); } static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 31db7a05dd36..a8828652f794 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -37,8 +37,9 @@ enum klp_state { * struct klp_func - function structure for live patching * @old_name: name of the function to be patched * @new_func: pointer to the patched function code - * @old_addr: a hint conveying at what address the old function - * can be found (optional, vmlinux patches only) + * @old_sympos: a hint indicating which symbol position the old function + * can be found (optional) + * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @state: tracks function-level patch application state * @stack_node: list node for klp_ops func_stack list @@ -48,16 +49,16 @@ struct klp_func { const char *old_name; void *new_func; /* - * The old_addr field is optional and can be used to resolve - * duplicate symbol names in the vmlinux object. If this - * information is not present, the symbol is located by name - * with kallsyms. If the name is not unique and old_addr is - * not provided, the patch application fails as there is no - * way to resolve the ambiguity. + * The old_sympos field is optional and can be used to resolve + * duplicate symbol names in livepatch objects. If this field is zero, + * it is expected the symbol is unique, otherwise patching fails. If + * this value is greater than zero then that occurrence of the symbol + * in kallsyms for the given object is used. */ - unsigned long old_addr; + unsigned long old_sympos; /* internal */ + unsigned long old_addr; struct kobject kobj; enum klp_state state; struct list_head stack_node; @@ -66,8 +67,7 @@ struct klp_func { /** * struct klp_reloc - relocation structure for live patching * @loc: address where the relocation will be written - * @val: address of the referenced symbol (optional, - * vmlinux patches only) + * @sympos: position in kallsyms to disambiguate symbols (optional) * @type: ELF relocation type * @name: name of the referenced symbol (for lookup/verification) * @addend: offset from the referenced symbol @@ -75,7 +75,7 @@ struct klp_func { */ struct klp_reloc { unsigned long loc; - unsigned long val; + unsigned long sympos; unsigned long type; const char *name; int addend; diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 4d24d64578c4..140edab64446 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -29,7 +29,7 @@ struct nlmsvc_binding { void (*fclose)(struct file *); }; -extern struct nlmsvc_binding * nlmsvc_ops; +extern const struct nlmsvc_binding *nlmsvc_ops; /* * Similar to nfs_client_initdata, but without the NFS-specific diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index ec3a6bab29de..71969de4058c 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1261,6 +1261,10 @@ * audit_rule_init. * @rule contains the allocated rule * + * @inode_invalidate_secctx: + * Notify the security module that it must revalidate the security context + * of an inode. + * * @inode_notifysecctx: * Notify the security module of what the security context of an inode * should be. Initializes the incore security context managed by the @@ -1413,14 +1417,14 @@ union security_list_options { int (*inode_removexattr)(struct dentry *dentry, const char *name); int (*inode_need_killpriv)(struct dentry *dentry); int (*inode_killpriv)(struct dentry *dentry); - int (*inode_getsecurity)(const struct inode *inode, const char *name, + int (*inode_getsecurity)(struct inode *inode, const char *name, void **buffer, bool alloc); int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); - void (*inode_getsecid)(const struct inode *inode, u32 *secid); + void (*inode_getsecid)(struct inode *inode, u32 *secid); int (*file_permission)(struct file *file, int mask); int (*file_alloc_security)(struct file *file); @@ -1516,6 +1520,7 @@ union security_list_options { int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); void (*release_secctx)(char *secdata, u32 seclen); + void (*inode_invalidate_secctx)(struct inode *inode); int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); @@ -1757,6 +1762,7 @@ struct security_hook_heads { struct list_head secid_to_secctx; struct list_head secctx_to_secid; struct list_head release_secctx; + struct list_head inode_invalidate_secctx; struct list_head inode_notifysecctx; struct list_head inode_setsecctx; struct list_head inode_getsecctx; diff --git a/include/linux/mdio.h b/include/linux/mdio.h index b42963bc81dd..5bfd99d1a40a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -11,6 +11,55 @@ #include <uapi/linux/mdio.h> +struct mii_bus; + +struct mdio_device { + struct device dev; + + const struct dev_pm_ops *pm_ops; + struct mii_bus *bus; + + int (*bus_match)(struct device *dev, struct device_driver *drv); + void (*device_free)(struct mdio_device *mdiodev); + void (*device_remove)(struct mdio_device *mdiodev); + + /* Bus address of the MDIO device (0-31) */ + int addr; + int flags; +}; +#define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +/* struct mdio_driver_common: Common to all MDIO drivers */ +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; +#define MDIO_DEVICE_FLAG_PHY 1 +#define to_mdio_common_driver(d) \ + container_of(d, struct mdio_driver_common, driver) + +/* struct mdio_driver: Generic MDIO driver */ +struct mdio_driver { + struct mdio_driver_common mdiodrv; + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct mdio_device *mdiodev); + + /* Clears up any memory if needed */ + void (*remove)(struct mdio_device *mdiodev); +}; +#define to_mdio_driver(d) \ + container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) + +void mdio_device_free(struct mdio_device *mdiodev); +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); +int mdio_device_register(struct mdio_device *mdiodev); +void mdio_device_remove(struct mdio_device *mdiodev); +int mdio_driver_register(struct mdio_driver *drv); +void mdio_driver_unregister(struct mdio_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { @@ -173,4 +222,33 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } +int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); + +int mdiobus_register_device(struct mdio_device *mdiodev); +int mdiobus_unregister_device(struct mdio_device *mdiodev); +bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); + +/** + * module_mdio_driver() - Helper macro for registering mdio drivers + * + * Helper macro for MDIO drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define mdio_module_driver(_mdio_driver) \ +static int __init mdio_module_init(void) \ +{ \ + return mdio_driver_register(&_mdio_driver); \ +} \ +module_init(mdio_module_init); \ +static void __exit mdio_module_exit(void) \ +{ \ + mdio_driver_unregister(&_mdio_driver); \ +} \ +module_exit(mdio_module_exit) + #endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 24daf8fc4d7c..3106ac1c895e 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -25,6 +25,7 @@ enum { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ + MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ }; struct memblock_region { @@ -60,6 +61,14 @@ extern int memblock_debug; extern bool movable_node_enabled; #endif /* CONFIG_MOVABLE_NODE */ +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK +#define __init_memblock __meminit +#define __initdata_memblock __meminitdata +#else +#define __init_memblock +#define __initdata_memblock +#endif + #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) @@ -82,6 +91,7 @@ bool memblock_overlaps_region(struct memblock_type *type, int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); +int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); ulong choose_memblock_flags(void); /* Low level functions */ @@ -164,7 +174,7 @@ static inline bool memblock_is_hotpluggable(struct memblock_region *m) return m->flags & MEMBLOCK_HOTPLUG; } -static inline bool movable_node_is_enabled(void) +static inline bool __init_memblock movable_node_is_enabled(void) { return movable_node_enabled; } @@ -184,6 +194,11 @@ static inline bool memblock_is_mirror(struct memblock_region *m) return m->flags & MEMBLOCK_MIRROR; } +static inline bool memblock_is_nomap(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_NOMAP; +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); @@ -209,10 +224,10 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL - * @flags: pick from blocks based on memory attributes * * Walks over free (memory && !reserved) areas of memblock. Available as * soon as memblock is initialized. @@ -225,10 +240,10 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, * for_each_free_mem_range_reverse - rev-iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL - * @flags: pick from blocks based on memory attributes * * Walks over free (memory && !reserved) areas of memblock in reverse * order. Available as soon as memblock is initialized. @@ -318,9 +333,10 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); -int memblock_is_memory(phys_addr_t addr); +bool memblock_is_memory(phys_addr_t addr); +int memblock_is_map_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); -int memblock_is_reserved(phys_addr_t addr); +bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); extern void __memblock_dump_all(void); @@ -391,14 +407,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) - -#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK -#define __init_memblock __meminit -#define __initdata_memblock __meminitdata -#else -#define __init_memblock -#define __initdata_memblock -#endif +#define for_each_memblock_type(memblock_type, rgn) \ + idx = 0; \ + rgn = &memblock_type->regions[idx]; \ + for (idx = 0; idx < memblock_type->cnt; \ + idx++,rgn = &memblock_type->regions[idx]) #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cd0e2413c358..189f04d4d2ec 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -85,32 +85,10 @@ enum mem_cgroup_events_target { MEM_CGROUP_NTARGETS, }; -/* - * Bits in struct cg_proto.flags - */ -enum cg_proto_flags { - /* Currently active and new sockets should be assigned to cgroups */ - MEMCG_SOCK_ACTIVE, - /* It was ever activated; we must disarm static keys on destruction */ - MEMCG_SOCK_ACTIVATED, -}; - struct cg_proto { struct page_counter memory_allocated; /* Current allocated memory. */ - struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; - long sysctl_mem[3]; - unsigned long flags; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; + bool active; }; #ifdef CONFIG_MEMCG @@ -192,6 +170,9 @@ struct mem_cgroup { unsigned long low; unsigned long high; + /* Range enforcement for interrupt charges */ + struct work_struct high_work; + unsigned long soft_limit; /* vmpressure notifications */ @@ -268,6 +249,10 @@ struct mem_cgroup { struct wb_domain cgwb_domain; #endif +#ifdef CONFIG_INET + unsigned long socket_pressure; +#endif + /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; @@ -275,7 +260,8 @@ struct mem_cgroup { struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; -extern struct cgroup_subsys_state *mem_cgroup_root_css; + +extern struct mem_cgroup *root_mem_cgroup; /** * mem_cgroup_events - count memory events against a cgroup @@ -294,10 +280,12 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp); + gfp_t gfp_mask, struct mem_cgroup **memcgp, + bool compound); void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare); -void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); + bool lrucare, bool compound); +void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, + bool compound); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); @@ -308,18 +296,34 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +#define mem_cgroup_from_counter(counter, member) \ + container_of(counter, struct mem_cgroup, member) + struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +/** + * parent_mem_cgroup - find the accounting parent of a memcg + * @memcg: memcg whose parent to find + * + * Returns the parent memcg, or NULL if this is the root or the memory + * controller is in legacy no-hierarchy mode. + */ +static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +{ + if (!memcg->memory.parent) + return NULL; + return mem_cgroup_from_counter(memcg->memory.parent, memory); +} + static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { @@ -513,7 +517,8 @@ static inline bool mem_cgroup_low(struct mem_cgroup *root, static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, - struct mem_cgroup **memcgp) + struct mem_cgroup **memcgp, + bool compound) { *memcgp = NULL; return 0; @@ -521,12 +526,13 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, static inline void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare) + bool lrucare, bool compound) { } static inline void mem_cgroup_cancel_charge(struct page *page, - struct mem_cgroup *memcg) + struct mem_cgroup *memcg, + bool compound) { } @@ -671,12 +677,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -enum { - UNDER_LIMIT, - SOFT_LIMIT, - OVER_LIMIT, -}; - #ifdef CONFIG_CGROUP_WRITEBACK struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); @@ -703,20 +703,35 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); -#else -static inline void sock_update_memcg(struct sock *sk) +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +#if defined(CONFIG_MEMCG) && defined(CONFIG_INET) +extern struct static_key_false memcg_sockets_enabled_key; +#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { +#ifdef CONFIG_MEMCG_KMEM + if (memcg->tcp_mem.memory_pressure) + return true; +#endif + do { + if (time_before(jiffies, memcg->socket_pressure)) + return true; + } while ((memcg = parent_mem_cgroup(memcg))); + return false; } -static inline void sock_release_memcg(struct sock *sk) +#else +#define mem_cgroup_sockets_enabled 0 +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { + return false; } -#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ +#endif #ifdef CONFIG_MEMCG_KMEM -extern struct static_key memcg_kmem_enabled_key; +extern struct static_key_false memcg_kmem_enabled_key; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); @@ -732,7 +747,7 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_key_false(&memcg_kmem_enabled_key); + return static_branch_unlikely(&memcg_kmem_enabled_key); } static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) @@ -766,15 +781,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -static inline bool __memcg_kmem_bypass(gfp_t gfp) +static inline bool __memcg_kmem_bypass(void) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; return false; @@ -791,7 +804,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp) static __always_inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) + return 0; + if (!(gfp & __GFP_ACCOUNT)) return 0; return __memcg_kmem_charge(page, gfp, order); } @@ -810,16 +825,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order) /** * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation * @cachep: the original global kmem cache - * @gfp: allocation flags. * * All memory allocated from a per-memcg cache is charged to the owner memcg. */ static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) return cachep; - return __memcg_kmem_get_cache(cachep); + return __memcg_kmem_get_cache(cachep, gfp); } static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 2ea574ff9714..43405992d027 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -275,7 +275,8 @@ extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern void remove_memory(int nid, u64 start, u64 size); extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn); -extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); +extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, + unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3d385c81c153..2696c1f05ed1 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -122,7 +122,7 @@ struct sp_node { struct shared_policy { struct rb_root root; - spinlock_t lock; + rwlock_t lock; }; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); diff --git a/include/linux/memremap.h b/include/linux/memremap.h new file mode 100644 index 000000000000..bcaa634139a9 --- /dev/null +++ b/include/linux/memremap.h @@ -0,0 +1,114 @@ +#ifndef _LINUX_MEMREMAP_H_ +#define _LINUX_MEMREMAP_H_ +#include <linux/mm.h> +#include <linux/ioport.h> +#include <linux/percpu-refcount.h> + +struct resource; +struct device; + +/** + * struct vmem_altmap - pre-allocated storage for vmemmap_populate + * @base_pfn: base of the entire dev_pagemap mapping + * @reserve: pages mapped, but reserved for driver use (relative to @base) + * @free: free pages set aside in the mapping for memmap storage + * @align: pages reserved to meet allocation alignments + * @alloc: track pages consumed, private to vmemmap_populate() + */ +struct vmem_altmap { + const unsigned long base_pfn; + const unsigned long reserve; + unsigned long free; + unsigned long align; + unsigned long alloc; +}; + +unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); +void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); + +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE) +struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); +#else +static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) +{ + return NULL; +} +#endif + +/** + * struct dev_pagemap - metadata for ZONE_DEVICE mappings + * @altmap: pre-allocated/reserved memory for vmemmap allocations + * @res: physical address range covered by @ref + * @ref: reference count that pins the devm_memremap_pages() mapping + * @dev: host device of the mapping for debug + */ +struct dev_pagemap { + struct vmem_altmap *altmap; + const struct resource *res; + struct percpu_ref *ref; + struct device *dev; +}; + +#ifdef CONFIG_ZONE_DEVICE +void *devm_memremap_pages(struct device *dev, struct resource *res, + struct percpu_ref *ref, struct vmem_altmap *altmap); +struct dev_pagemap *find_dev_pagemap(resource_size_t phys); +#else +static inline void *devm_memremap_pages(struct device *dev, + struct resource *res, struct percpu_ref *ref, + struct vmem_altmap *altmap) +{ + /* + * Fail attempts to call devm_memremap_pages() without + * ZONE_DEVICE support enabled, this requires callers to fall + * back to plain devm_memremap() based on config + */ + WARN_ON_ONCE(1); + return ERR_PTR(-ENXIO); +} + +static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) +{ + return NULL; +} +#endif + +/** + * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn + * @pfn: page frame number to lookup page_map + * @pgmap: optional known pgmap that already has a reference + * + * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the + * same mapping. + */ +static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) +{ + const struct resource *res = pgmap ? pgmap->res : NULL; + resource_size_t phys = PFN_PHYS(pfn); + + /* + * In the cached case we're already holding a live reference so + * we can simply do a blind increment + */ + if (res && phys >= res->start && phys <= res->end) { + percpu_ref_get(pgmap->ref); + return pgmap; + } + + /* fall back to slow path lookup */ + rcu_read_lock(); + pgmap = find_dev_pagemap(phys); + if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) + pgmap = NULL; + rcu_read_unlock(); + + return pgmap; +} + +static inline void put_dev_pagemap(struct dev_pagemap *pgmap) +{ + if (pgmap) + percpu_ref_put(pgmap->ref); +} +#endif /* _LINUX_MEMREMAP_H_ */ diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 79e607e2f081..d55a42297d49 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -27,6 +27,8 @@ enum arizona_type { WM8280 = 4, WM8998 = 5, WM1814 = 6, + WM1831 = 7, + CS47L24 = 8, }; #define ARIZONA_IRQ_GP1 0 @@ -166,6 +168,7 @@ static inline int wm5102_patch(struct arizona *arizona) #endif int wm5110_patch(struct arizona *arizona); +int cs47l24_patch(struct arizona *arizona); int wm8997_patch(struct arizona *arizona); int wm8998_patch(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 57b45caaea80..64faeeff698c 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -171,7 +171,7 @@ struct arizona_pdata { int inmode[ARIZONA_MAX_INPUT]; /** Mode for outputs */ - bool out_mono[ARIZONA_MAX_OUTPUT]; + int out_mono[ARIZONA_MAX_OUTPUT]; /** PDM speaker mute setting */ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK]; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 27dac3ff18b9..bc6f7e00fb3d 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -17,6 +17,7 @@ #include <linux/platform_device.h> struct irq_domain; +struct property_set; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -44,6 +45,10 @@ struct mfd_cell { /* platform data passed to the sub devices drivers */ void *platform_data; size_t pdata_size; + + /* device properties passed to the sub devices drivers */ + const struct property_set *pset; + /* * Device Tree compatible string * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 13e1d96935ed..c800dbc42079 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -134,21 +134,32 @@ struct palmas_pmic_driver_data { struct regulator_config config); }; +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + struct palmas_gpadc_platform_data { /* Channel 3 current source is only enabled during conversion */ - int ch3_current; + int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */ /* Channel 0 current source can be used for battery detection. * If used for battery detection this will cause a permanent current * consumption depending on current level set here. */ - int ch0_current; + int ch0_current; /* 0: off; 1: 5uA; 2: 15uA; 3: 20 uA */ + bool extended_delay; /* use extended delay for conversion */ /* default BAT_REMOVAL_DAT setting on device probe */ int bat_removal; /* Sets the START_POLARITY bit in the RT_CTRL register */ int start_polarity; + + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; }; struct palmas_reg_init { @@ -405,28 +416,7 @@ struct palmas_gpadc_calibration { s32 offset_error; }; -struct palmas_gpadc { - struct device *dev; - struct palmas *palmas; - - int ch3_current; - int ch0_current; - - int gpadc_force; - - int bat_removal; - - struct mutex reading_lock; - struct completion irq_complete; - - int eoc_sw_irq; - - struct palmas_gpadc_calibration *palmas_cal_tbl; - - int conv0_channel; - int conv1_channel; - int rt_channel; -}; +#define PALMAS_DATASHEET_NAME(_name) "palmas-gpadc-chan-"#_name struct palmas_gpadc_result { s32 raw_code; @@ -520,6 +510,43 @@ enum palmas_irqs { PALMAS_NUM_IRQ, }; +/* Palmas GPADC Channels */ +enum { + PALMAS_ADC_CH_IN0, + PALMAS_ADC_CH_IN1, + PALMAS_ADC_CH_IN2, + PALMAS_ADC_CH_IN3, + PALMAS_ADC_CH_IN4, + PALMAS_ADC_CH_IN5, + PALMAS_ADC_CH_IN6, + PALMAS_ADC_CH_IN7, + PALMAS_ADC_CH_IN8, + PALMAS_ADC_CH_IN9, + PALMAS_ADC_CH_IN10, + PALMAS_ADC_CH_IN11, + PALMAS_ADC_CH_IN12, + PALMAS_ADC_CH_IN13, + PALMAS_ADC_CH_IN14, + PALMAS_ADC_CH_IN15, + PALMAS_ADC_CH_MAX, +}; + +/* Palmas GPADC Channel0 Current Source */ +enum { + PALMAS_ADC_CH0_CURRENT_SRC_0, + PALMAS_ADC_CH0_CURRENT_SRC_5, + PALMAS_ADC_CH0_CURRENT_SRC_15, + PALMAS_ADC_CH0_CURRENT_SRC_20, +}; + +/* Palmas GPADC Channel3 Current Source */ +enum { + PALMAS_ADC_CH3_CURRENT_SRC_0, + PALMAS_ADC_CH3_CURRENT_SRC_10, + PALMAS_ADC_CH3_CURRENT_SRC_400, + PALMAS_ADC_CH3_CURRENT_SRC_800, +}; + struct palmas_pmic { struct palmas *palmas; struct device *dev; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index a06098639399..6bc4bcd488ac 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -44,6 +44,7 @@ enum sec_device_type { S2MPS11X, S2MPS13X, S2MPS14X, + S2MPS15X, S2MPU02, }; diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 29c30ac36020..48c3c5be7eb1 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -105,8 +105,12 @@ enum s2mps_rtc_reg { #define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT) #define S2MPS_RTC_WUDR_SHIFT 4 #define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) +#define S2MPS15_RTC_AUDR_SHIFT 4 +#define S2MPS15_RTC_AUDR_MASK (1 << S2MPS15_RTC_AUDR_SHIFT) #define S2MPS13_RTC_AUDR_SHIFT 1 #define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT) +#define S2MPS15_RTC_WUDR_SHIFT 1 +#define S2MPS15_RTC_WUDR_MASK (1 << S2MPS15_RTC_WUDR_SHIFT) #define S2MPS_RTC_RUDR_SHIFT 0 #define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) #define RTC_TCON_SHIFT 1 diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h new file mode 100644 index 000000000000..36d35287c3c0 --- /dev/null +++ b/include/linux/mfd/samsung/s2mps15.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_S2MPS15_H +#define __LINUX_MFD_S2MPS15_H + +/* S2MPS15 registers */ +enum s2mps15_reg { + S2MPS15_REG_ID, + S2MPS15_REG_INT1, + S2MPS15_REG_INT2, + S2MPS15_REG_INT3, + S2MPS15_REG_INT1M, + S2MPS15_REG_INT2M, + S2MPS15_REG_INT3M, + S2MPS15_REG_ST1, + S2MPS15_REG_ST2, + S2MPS15_REG_PWRONSRC, + S2MPS15_REG_OFFSRC, + S2MPS15_REG_BU_CHG, + S2MPS15_REG_RTC_BUF, + S2MPS15_REG_CTRL1, + S2MPS15_REG_CTRL2, + S2MPS15_REG_RSVD1, + S2MPS15_REG_RSVD2, + S2MPS15_REG_RSVD3, + S2MPS15_REG_RSVD4, + S2MPS15_REG_RSVD5, + S2MPS15_REG_RSVD6, + S2MPS15_REG_CTRL3, + S2MPS15_REG_RSVD7, + S2MPS15_REG_RSVD8, + S2MPS15_REG_RSVD9, + S2MPS15_REG_B1CTRL1, + S2MPS15_REG_B1CTRL2, + S2MPS15_REG_B2CTRL1, + S2MPS15_REG_B2CTRL2, + S2MPS15_REG_B3CTRL1, + S2MPS15_REG_B3CTRL2, + S2MPS15_REG_B4CTRL1, + S2MPS15_REG_B4CTRL2, + S2MPS15_REG_B5CTRL1, + S2MPS15_REG_B5CTRL2, + S2MPS15_REG_B6CTRL1, + S2MPS15_REG_B6CTRL2, + S2MPS15_REG_B7CTRL1, + S2MPS15_REG_B7CTRL2, + S2MPS15_REG_B8CTRL1, + S2MPS15_REG_B8CTRL2, + S2MPS15_REG_B9CTRL1, + S2MPS15_REG_B9CTRL2, + S2MPS15_REG_B10CTRL1, + S2MPS15_REG_B10CTRL2, + S2MPS15_REG_BBCTRL1, + S2MPS15_REG_BBCTRL2, + S2MPS15_REG_BRAMP, + S2MPS15_REG_LDODVS1, + S2MPS15_REG_LDODVS2, + S2MPS15_REG_LDODVS3, + S2MPS15_REG_LDODVS4, + S2MPS15_REG_L1CTRL, + S2MPS15_REG_L2CTRL, + S2MPS15_REG_L3CTRL, + S2MPS15_REG_L4CTRL, + S2MPS15_REG_L5CTRL, + S2MPS15_REG_L6CTRL, + S2MPS15_REG_L7CTRL, + S2MPS15_REG_L8CTRL, + S2MPS15_REG_L9CTRL, + S2MPS15_REG_L10CTRL, + S2MPS15_REG_L11CTRL, + S2MPS15_REG_L12CTRL, + S2MPS15_REG_L13CTRL, + S2MPS15_REG_L14CTRL, + S2MPS15_REG_L15CTRL, + S2MPS15_REG_L16CTRL, + S2MPS15_REG_L17CTRL, + S2MPS15_REG_L18CTRL, + S2MPS15_REG_L19CTRL, + S2MPS15_REG_L20CTRL, + S2MPS15_REG_L21CTRL, + S2MPS15_REG_L22CTRL, + S2MPS15_REG_L23CTRL, + S2MPS15_REG_L24CTRL, + S2MPS15_REG_L25CTRL, + S2MPS15_REG_L26CTRL, + S2MPS15_REG_L27CTRL, + S2MPS15_REG_LDODSCH1, + S2MPS15_REG_LDODSCH2, + S2MPS15_REG_LDODSCH3, + S2MPS15_REG_LDODSCH4, +}; + +/* S2MPS15 regulator ids */ +enum s2mps15_regulators { + S2MPS15_LDO1, + S2MPS15_LDO2, + S2MPS15_LDO3, + S2MPS15_LDO4, + S2MPS15_LDO5, + S2MPS15_LDO6, + S2MPS15_LDO7, + S2MPS15_LDO8, + S2MPS15_LDO9, + S2MPS15_LDO10, + S2MPS15_LDO11, + S2MPS15_LDO12, + S2MPS15_LDO13, + S2MPS15_LDO14, + S2MPS15_LDO15, + S2MPS15_LDO16, + S2MPS15_LDO17, + S2MPS15_LDO18, + S2MPS15_LDO19, + S2MPS15_LDO20, + S2MPS15_LDO21, + S2MPS15_LDO22, + S2MPS15_LDO23, + S2MPS15_LDO24, + S2MPS15_LDO25, + S2MPS15_LDO26, + S2MPS15_LDO27, + S2MPS15_BUCK1, + S2MPS15_BUCK2, + S2MPS15_BUCK3, + S2MPS15_BUCK4, + S2MPS15_BUCK5, + S2MPS15_BUCK6, + S2MPS15_BUCK7, + S2MPS15_BUCK8, + S2MPS15_BUCK9, + S2MPS15_BUCK10, + S2MPS15_BUCK11, + S2MPS15_REGULATOR_MAX, +}; + +#define S2MPS15_LDO_VSEL_MASK (0x3F) +#define S2MPS15_BUCK_VSEL_MASK (0xFF) + +#define S2MPS15_ENABLE_SHIFT (0x06) +#define S2MPS15_ENABLE_MASK (0x03 << S2MPS15_ENABLE_SHIFT) + +#define S2MPS15_LDO_N_VOLTAGES (S2MPS15_LDO_VSEL_MASK + 1) +#define S2MPS15_BUCK_N_VOLTAGES (S2MPS15_BUCK_VSEL_MASK + 1) + +#endif /* __LINUX_MFD_S2MPS15_H */ diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index 2f9b593246ee..d58f3b5f585a 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -200,6 +200,8 @@ enum tps65218_regulator_id { TPS65218_DCDC_4, TPS65218_DCDC_5, TPS65218_DCDC_6, + /* LS's */ + TPS65218_LS_3, /* LDOs */ TPS65218_LDO_1, }; @@ -210,8 +212,11 @@ enum tps65218_regulator_id { #define TPS65218_NUM_DCDC 6 /* Number of LDO voltage regulators available */ #define TPS65218_NUM_LDO 1 +/* Number of total LS current regulators available */ +#define TPS65218_NUM_LS 1 /* Number of total regulators available */ -#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO) +#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ + + TPS65218_NUM_LS) /* Define the TPS65218 IRQ numbers */ enum tps65218_irqs { diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index 579b50ca2e02..7a09e7f1f984 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h @@ -715,7 +715,6 @@ struct wm8350_led_platform_data { struct wm8350_led { struct platform_device *pdev; - struct mutex mutex; struct work_struct work; spinlock_t value_lock; enum led_brightness value; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 5a06d969338e..2e8af001c5da 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev) return !!(dev->flags & MLX4_FLAG_BONDED); } +static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) +{ + return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); +} + struct mlx4_port_map { u8 port1; u8 port2; diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index abc4767695e4..b2c9fada8eac 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -45,7 +45,7 @@ struct mlx5_core_cq { atomic_t refcount; struct completion free; unsigned vector; - int irqn; + unsigned int irqn; void (*comp) (struct mlx5_core_cq *); void (*event) (struct mlx5_core_cq *, enum mlx5_event); struct mlx5_uar *uar; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0b473cbfa7ef..7be845e30689 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -251,6 +251,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, + MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, }; enum { @@ -442,9 +443,12 @@ struct mlx5_init_seg { __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; - __be32 rsvd2[884]; + __be32 rsvd2[880]; + __be32 internal_timer_h; + __be32 internal_timer_l; + __be32 rsrv3[2]; __be32 health_counter; - __be32 rsvd3[1019]; + __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; @@ -520,6 +524,12 @@ struct mlx5_eqe_page_fault { __be32 flags_qpn; } __packed; +struct mlx5_eqe_vport_change { + u8 rsvd0[2]; + __be16 vport_num; + __be32 rsvd1[6]; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -532,6 +542,7 @@ union ev_data { struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; + struct mlx5_eqe_vport_change vport_change; } __packed; struct mlx5_eqe { @@ -593,7 +604,8 @@ struct mlx5_cqe64 { __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; - __be64 timestamp; + __be32 timestamp_h; + __be32 timestamp_l; __be32 sop_drop_qpn; __be16 wqe_counter; u8 signature; @@ -615,6 +627,16 @@ static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) return !!(cqe->l4_hdr_type_etc & 0x1); } +static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) +{ + u32 hi, lo; + + hi = be32_to_cpu(cqe->timestamp_h); + lo = be32_to_cpu(cqe->timestamp_l); + + return (u64)lo | ((u64)hi << 32); +} + enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, @@ -1067,6 +1089,12 @@ enum { }; enum { + MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, + MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, + MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, }; @@ -1102,6 +1130,12 @@ enum { MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, }; +enum mlx5_list_type { + MLX5_NVPRT_LIST_TYPE_UC = 0x0, + MLX5_NVPRT_LIST_TYPE_MC = 0x1, + MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, +}; + enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, @@ -1124,6 +1158,8 @@ enum mlx5_cap_type { MLX5_CAP_IPOIB_OFFLOADS, MLX5_CAP_EOIB_OFFLOADS, MLX5_CAP_FLOW_TABLE, + MLX5_CAP_ESWITCH_FLOW_TABLE, + MLX5_CAP_ESWITCH, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1161,6 +1197,28 @@ enum mlx5_cap_type { #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) + +#define MLX5_CAP_ESW_MAX(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) @@ -1200,4 +1258,6 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } +#define MLX5_BY_PASS_NUM_PRIOS 9 + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5c857f2a20d7..5162f3533042 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -303,7 +303,7 @@ struct mlx5_eq { u32 cons_index; struct mlx5_buf buf; int size; - u8 irqn; + unsigned int irqn; u8 eqn; int nent; u64 mask; @@ -426,11 +426,23 @@ struct mlx5_mr_table { struct radix_tree_root tree; }; +struct mlx5_vf_context { + int enabled; +}; + +struct mlx5_core_sriov { + struct mlx5_vf_context *vfs_ctx; + int num_vfs; + int enabled_vfs; +}; + struct mlx5_irq_info { cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; }; +struct mlx5_eswitch; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -447,6 +459,7 @@ struct mlx5_priv { int fw_pages; atomic_t reg_pages; struct list_head free_list; + int vfs_pages; struct mlx5_core_health health; @@ -485,6 +498,12 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; + + struct mlx5_eswitch *eswitch; + struct mlx5_core_sriov sriov; + unsigned long pci_dev_data; + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_root_namespace *fdb_root_ns; }; enum mlx5_device_state { @@ -739,6 +758,8 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +int mlx5_sriov_init(struct mlx5_core_dev *dev); +int mlx5_sriov_cleanup(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); @@ -762,7 +783,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -884,6 +906,15 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +enum { + MLX5_PCI_DEV_IS_VF = 1 << 0, +}; + +static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +{ + return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); +} + static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h deleted file mode 100644 index 5f922c6d4fc2..000000000000 --- a/include/linux/mlx5/flow_table.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_FLOW_TABLE_H -#define MLX5_FLOW_TABLE_H - -#include <linux/mlx5/driver.h> - -struct mlx5_flow_table_group { - u8 log_sz; - u8 match_criteria_enable; - u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; -}; - -void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, - u16 num_groups, - struct mlx5_flow_table_group *group); -void mlx5_destroy_flow_table(void *flow_table); -int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, - void *match_criteria, void *flow_context, - u32 *flow_index); -void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index); -u32 mlx5_get_flow_table_id(void *flow_table); - -#endif /* MLX5_FLOW_TABLE_H */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h new file mode 100644 index 000000000000..8230caa3fb6e --- /dev/null +++ b/include/linux/mlx5/fs.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_ +#define _MLX5_FS_ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> + +#define MLX5_FS_DEFAULT_FLOW_TAG 0x0 + +#define LEFTOVERS_RULE_NUM 2 +static inline void build_leftovers_ft_param(int *priority, + int *n_ent, + int *n_grp) +{ + *priority = 0; /* Priority of leftovers_prio-0 */ + *n_ent = LEFTOVERS_RULE_NUM; + *n_grp = LEFTOVERS_RULE_NUM; +} + +enum mlx5_flow_namespace_type { + MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_KERNEL, + MLX5_FLOW_NAMESPACE_LEFTOVERS, + MLX5_FLOW_NAMESPACE_FDB, +}; + +struct mlx5_flow_table; +struct mlx5_flow_group; +struct mlx5_flow_rule; +struct mlx5_flow_namespace; + +struct mlx5_flow_destination { + enum mlx5_flow_destination_type type; + union { + u32 tir_num; + struct mlx5_flow_table *ft; + u32 vport_num; + }; +}; + +struct mlx5_flow_namespace * +mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type); + +struct mlx5_flow_table * +mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups); + +struct mlx5_flow_table * +mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries); +int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); + +/* inbox should be set with the following values: + * start_flow_index + * end_flow_index + * match_criteria_enable + * match_criteria + */ +struct mlx5_flow_group * +mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); +void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); + +/* Single destination per rule. + * Group ID is implied by the match criteria. + */ +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest); +void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); + +#endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1565324eb620..68d73f82e009 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -185,6 +185,7 @@ enum { MLX5_CMD_OP_MODIFY_RQT = 0x917, MLX5_CMD_OP_DESTROY_RQT = 0x918, MLX5_CMD_OP_QUERY_RQT = 0x919, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, @@ -193,7 +194,8 @@ enum { MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, - MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 + MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c }; struct mlx5_ifc_flow_table_fields_supported_bits { @@ -256,25 +258,30 @@ struct mlx5_ifc_flow_table_fields_supported_bits { struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_0[0x1f]; + u8 reserved_0[0x2]; + u8 flow_modify_en[0x1]; + u8 modify_root[0x1]; + u8 identified_miss_table_mode[0x1]; + u8 flow_table_modify[0x1]; + u8 reserved_1[0x19]; - u8 reserved_1[0x2]; + u8 reserved_2[0x2]; u8 log_max_ft_size[0x6]; - u8 reserved_2[0x10]; + u8 reserved_3[0x10]; u8 max_ft_level[0x8]; - u8 reserved_3[0x20]; + u8 reserved_4[0x20]; - u8 reserved_4[0x18]; + u8 reserved_5[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_5[0x18]; + u8 reserved_6[0x18]; u8 log_max_destination[0x8]; - u8 reserved_6[0x18]; + u8 reserved_7[0x18]; u8 log_max_flow[0x8]; - u8 reserved_7[0x40]; + u8 reserved_8[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; @@ -291,6 +298,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 reserved_1[0x1a]; }; +struct mlx5_ifc_ipv4_layout_bits { + u8 reserved_0[0x60]; + + u8 ipv4[0x20]; +}; + +struct mlx5_ifc_ipv6_layout_bits { + u8 ipv6[16][0x8]; +}; + +union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; + u8 reserved_0[0x80]; +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -321,9 +344,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 udp_sport[0x10]; u8 udp_dport[0x10]; - u8 src_ip[4][0x20]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; - u8 dst_ip[4][0x20]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; struct mlx5_ifc_fte_match_set_misc_bits { @@ -447,6 +470,29 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 reserved_3[0x7200]; }; +struct mlx5_ifc_flow_table_eswitch_cap_bits { + u8 reserved_0[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; + + u8 reserved_1[0x7800]; +}; + +struct mlx5_ifc_e_switch_cap_bits { + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert_if_not_exist[0x1]; + u8 vport_cvlan_insert_overwrite[0x1]; + u8 reserved_0[0x1b]; + + u8 reserved_1[0x7e0]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -665,7 +711,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_17[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 reserved_18[0x4]; + u8 eswitch_flow_table[0x1]; + u8 early_vf_enable; + u8 reserved_18[0x2]; u8 local_ca_ack_delay[0x5]; u8 reserved_19[0x6]; u8 port_type[0x2]; @@ -787,27 +835,36 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_60[0x1b]; u8 log_max_wq_sz[0x5]; - u8 reserved_61[0xa0]; - + u8 nic_vport_change_event[0x1]; + u8 reserved_61[0xa]; + u8 log_max_vlan_list[0x5]; u8 reserved_62[0x3]; + u8 log_max_current_mc_list[0x5]; + u8 reserved_63[0x3]; + u8 log_max_current_uc_list[0x5]; + + u8 reserved_64[0x80]; + + u8 reserved_65[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_63[0x8]; + u8 reserved_66[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_64[0x100]; - - u8 reserved_65[0x1f]; + u8 reserved_67[0x40]; + u8 device_frequency_khz[0x20]; + u8 reserved_68[0x5f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_66[0x220]; + u8 reserved_69[0x220]; }; -enum { - MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, - MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, +enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, + MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, }; struct mlx5_ifc_dest_format_struct_bits { @@ -900,6 +957,13 @@ struct mlx5_ifc_mac_address_layout_bits { u8 mac_addr_31_0[0x20]; }; +struct mlx5_ifc_vlan_layout_bits { + u8 reserved_0[0x14]; + u8 vlan[0x0c]; + + u8 reserved_1[0x20]; +}; + struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_0[0xa0]; @@ -1829,6 +1893,8 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_roce_cap_bits roce_cap; struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; + struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; + struct mlx5_ifc_e_switch_cap_bits e_switch_cap; u8 reserved_0[0x8000]; }; @@ -2133,24 +2199,35 @@ struct mlx5_ifc_rmpc_bits { struct mlx5_ifc_wq_bits wq; }; -enum { - MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, -}; - struct mlx5_ifc_nic_vport_context_bits { u8 reserved_0[0x1f]; u8 roce_en[0x1]; - u8 reserved_1[0x760]; + u8 arm_change_event[0x1]; + u8 reserved_1[0x1a]; + u8 event_on_mtu[0x1]; + u8 event_on_promisc_change[0x1]; + u8 event_on_vlan_change[0x1]; + u8 event_on_mc_address_change[0x1]; + u8 event_on_uc_address_change[0x1]; - u8 reserved_2[0x5]; + u8 reserved_2[0xf0]; + + u8 mtu[0x10]; + + u8 reserved_3[0x640]; + + u8 promisc_uc[0x1]; + u8 promisc_mc[0x1]; + u8 promisc_all[0x1]; + u8 reserved_4[0x2]; u8 allowed_list_type[0x3]; - u8 reserved_3[0xc]; + u8 reserved_5[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; - u8 reserved_4[0x20]; + u8 reserved_6[0x20]; u8 current_uc_mac_address[0][0x40]; }; @@ -2263,6 +2340,26 @@ struct mlx5_ifc_hca_vport_context_bits { u8 reserved_6[0xca0]; }; +struct mlx5_ifc_esw_vport_context_bits { + u8 reserved_0[0x3]; + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert[0x2]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x20]; + + u8 svlan_cfi[0x1]; + u8 svlan_pcp[0x3]; + u8 svlan_id[0xc]; + u8 cvlan_cfi[0x1]; + u8 cvlan_pcp[0x3]; + u8 cvlan_id[0xc]; + + u8 reserved_3[0x7a0]; +}; + enum { MLX5_EQC_STATUS_OK = 0x0, MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, @@ -2769,6 +2866,13 @@ struct mlx5_ifc_set_hca_cap_in_bits { union mlx5_ifc_hca_cap_union_bits capability; }; +enum { + MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 +}; + struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; @@ -2793,11 +2897,14 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_4[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x40]; + u8 reserved_5[0x18]; + u8 modify_enable_mask[0x8]; + + u8 reserved_6[0x20]; u8 flow_index[0x20]; - u8 reserved_6[0xe0]; + u8 reserved_7[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; @@ -2940,6 +3047,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, }; struct mlx5_ifc_query_vport_state_in_bits { @@ -3700,6 +3808,64 @@ struct mlx5_ifc_query_flow_group_in_bits { u8 reserved_5[0x120]; }; +struct mlx5_ifc_query_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + +struct mlx5_ifc_query_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_modify_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_esw_vport_context_fields_select_bits { + u8 reserved[0x1c]; + u8 vport_cvlan_insert[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_strip[0x1]; +}; + +struct mlx5_ifc_modify_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; @@ -4228,7 +4394,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_0[0x1c]; + u8 reserved_0[0x19]; + u8 mtu[0x1]; + u8 change_event[0x1]; + u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; @@ -5519,12 +5688,16 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_4[0x20]; - u8 reserved_5[0x8]; + u8 reserved_5[0x4]; + u8 table_miss_mode[0x4]; u8 level[0x8]; u8 reserved_6[0x8]; u8 log_size[0x8]; - u8 reserved_7[0x120]; + u8 reserved_7[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_8[0x100]; }; struct mlx5_ifc_create_flow_group_out_bits { @@ -6798,4 +6971,72 @@ union mlx5_ifc_uplink_pci_interface_document_bits { u8 reserved_0[0x20060]; }; +struct mlx5_ifc_set_flow_table_root_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_flow_table_root_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +enum { + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1, +}; + +struct mlx5_ifc_modify_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 modify_field_select[0x10]; + + u8 table_type[0x8]; + u8 reserved_4[0x18]; + + u8 reserved_5[0x8]; + u8 table_id[0x18]; + + u8 reserved_6[0x4]; + u8 table_miss_mode[0x4]; + u8 reserved_7[0x18]; + + u8 reserved_8[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_9[0x100]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 967e0fd06e89..638f2ca7a527 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -34,9 +34,17 @@ #define __MLX5_VPORT_H__ #include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> -u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); -void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); +u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport); +int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u8 state); +int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr); +int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, + u16 vport, u8 *addr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, union ib_gid *gid); @@ -51,5 +59,30 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, u64 *sys_image_guid); int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid); +int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, + u32 vport, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, + u32 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all); +int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, + int promisc_uc, + int promisc_mc, + int promisc_all); +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size); +int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, + u16 vlans[], + int list_size); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 00bad7793788..f1cd22f2df1a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -16,6 +16,7 @@ #include <linux/mm_types.h> #include <linux/range.h> #include <linux/pfn.h> +#include <linux/percpu-refcount.h> #include <linux/bit_spinlock.h> #include <linux/shrinker.h> #include <linux/resource.h> @@ -51,6 +52,17 @@ extern int sysctl_legacy_va_layout; #define sysctl_legacy_va_layout 0 #endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS +extern const int mmap_rnd_bits_min; +extern const int mmap_rnd_bits_max; +extern int mmap_rnd_bits __read_mostly; +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS +extern const int mmap_rnd_compat_bits_min; +extern const int mmap_rnd_compat_bits_max; +extern int mmap_rnd_compat_bits __read_mostly; +#endif + #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -225,10 +237,14 @@ extern pgprot_t protection_map[16]; * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * + * MM layer fills up gfp_mask for page allocations but fault handler might + * alter it if its implementation requires a different allocation context. + * * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { unsigned int flags; /* FAULT_FLAG_xxx flags */ + gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ void __user *virtual_address; /* Faulting virtual address */ @@ -314,6 +330,13 @@ struct inode; #define page_private(page) ((page)->private) #define set_page_private(page, v) ((page)->private = (v)) +#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +#endif + /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) @@ -395,39 +418,17 @@ static inline int is_vmalloc_or_module_addr(const void *x) extern void kvfree(const void *addr); -static inline void compound_lock(struct page *page) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON_PAGE(PageSlab(page), page); - bit_spin_lock(PG_compound_lock, &page->flags); -#endif -} - -static inline void compound_unlock(struct page *page) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON_PAGE(PageSlab(page), page); - bit_spin_unlock(PG_compound_lock, &page->flags); -#endif -} - -static inline unsigned long compound_lock_irqsave(struct page *page) +static inline atomic_t *compound_mapcount_ptr(struct page *page) { - unsigned long uninitialized_var(flags); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - local_irq_save(flags); - compound_lock(page); -#endif - return flags; + return &page[1].compound_mapcount; } -static inline void compound_unlock_irqrestore(struct page *page, - unsigned long flags) +static inline int compound_mapcount(struct page *page) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - compound_unlock(page); - local_irq_restore(flags); -#endif + if (!PageCompound(page)) + return 0; + page = compound_head(page); + return atomic_read(compound_mapcount_ptr(page)) + 1; } /* @@ -440,61 +441,29 @@ static inline void page_mapcount_reset(struct page *page) atomic_set(&(page)->_mapcount, -1); } +int __page_mapcount(struct page *page); + static inline int page_mapcount(struct page *page) { VM_BUG_ON_PAGE(PageSlab(page), page); - return atomic_read(&page->_mapcount) + 1; -} - -static inline int page_count(struct page *page) -{ - return atomic_read(&compound_head(page)->_count); -} - -static inline bool __compound_tail_refcounted(struct page *page) -{ - return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page); -} -/* - * This takes a head page as parameter and tells if the - * tail page reference counting can be skipped. - * - * For this to be safe, PageSlab and PageHeadHuge must remain true on - * any given page where they return true here, until all tail pins - * have been released. - */ -static inline bool compound_tail_refcounted(struct page *page) -{ - VM_BUG_ON_PAGE(!PageHead(page), page); - return __compound_tail_refcounted(page); + if (unlikely(PageCompound(page))) + return __page_mapcount(page); + return atomic_read(&page->_mapcount) + 1; } -static inline void get_huge_page_tail(struct page *page) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int total_mapcount(struct page *page); +#else +static inline int total_mapcount(struct page *page) { - /* - * __split_huge_page_refcount() cannot run from under us. - */ - VM_BUG_ON_PAGE(!PageTail(page), page); - VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); - VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); - if (compound_tail_refcounted(compound_head(page))) - atomic_inc(&page->_mapcount); + return page_mapcount(page); } +#endif -extern bool __get_page_tail(struct page *page); - -static inline void get_page(struct page *page) +static inline int page_count(struct page *page) { - if (unlikely(PageTail(page))) - if (likely(__get_page_tail(page))) - return; - /* - * Getting a normal page or the head of a compound page - * requires to already have an elevated page->_count. - */ - VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); - atomic_inc(&page->_count); + return atomic_read(&compound_head(page)->_count); } static inline struct page *virt_to_head_page(const void *x) @@ -513,7 +482,8 @@ static inline void init_page_count(struct page *page) atomic_set(&page->_count, 1); } -void put_page(struct page *page); +void __put_page(struct page *page); + void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); @@ -533,6 +503,9 @@ enum compound_dtor_id { #ifdef CONFIG_HUGETLB_PAGE HUGETLB_PAGE_DTOR, #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + TRANSHUGE_PAGE_DTOR, +#endif NR_COMPOUND_DTORS, }; extern compound_page_dtor * const compound_page_dtors[]; @@ -562,6 +535,8 @@ static inline void set_compound_order(struct page *page, unsigned int order) page[1].compound_order = order; } +void free_compound_page(struct page *page); + #ifdef CONFIG_MMU /* * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when @@ -689,6 +664,51 @@ static inline enum zone_type page_zonenum(const struct page *page) return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } +#ifdef CONFIG_ZONE_DEVICE +void get_zone_device_page(struct page *page); +void put_zone_device_page(struct page *page); +static inline bool is_zone_device_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_DEVICE; +} +#else +static inline void get_zone_device_page(struct page *page) +{ +} +static inline void put_zone_device_page(struct page *page) +{ +} +static inline bool is_zone_device_page(const struct page *page) +{ + return false; +} +#endif + +static inline void get_page(struct page *page) +{ + page = compound_head(page); + /* + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_count. + */ + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); + atomic_inc(&page->_count); + + if (unlikely(is_zone_device_page(page))) + get_zone_device_page(page); +} + +static inline void put_page(struct page *page) +{ + page = compound_head(page); + + if (put_page_testzero(page)) + __put_page(page); + + if (unlikely(is_zone_device_page(page))) + put_zone_device_page(page); +} + #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif @@ -978,10 +998,21 @@ static inline pgoff_t page_file_index(struct page *page) /* * Return true if this page is mapped into pagetables. + * For compound page it returns true if any subpage of compound page is mapped. */ -static inline int page_mapped(struct page *page) -{ - return atomic_read(&(page)->_mapcount) >= 0; +static inline bool page_mapped(struct page *page) +{ + int i; + if (likely(!PageCompound(page))) + return atomic_read(&page->_mapcount) >= 0; + page = compound_head(page); + if (atomic_read(compound_mapcount_ptr(page)) >= 0) + return true; + for (i = 0; i < hpage_nr_pages(page); i++) { + if (atomic_read(&page[i]._mapcount) >= 0) + return true; + } + return false; } /* @@ -1069,7 +1100,7 @@ static inline bool shmem_mapping(struct address_space *mapping) } #endif -extern int can_do_mlock(void); +extern bool can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); @@ -1163,7 +1194,8 @@ int invalidate_inode_page(struct page *page); extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, - unsigned long address, unsigned int fault_flags); + unsigned long address, unsigned int fault_flags, + bool *unlocked); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, @@ -1175,7 +1207,7 @@ static inline int handle_mm_fault(struct mm_struct *mm, } static inline int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, - unsigned int fault_flags) + unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ BUG(); @@ -1361,10 +1393,26 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member) atomic_long_dec(&mm->rss_stat.count[member]); } +/* Optimized variant when page is already known not to be PageAnon */ +static inline int mm_counter_file(struct page *page) +{ + if (PageSwapBacked(page)) + return MM_SHMEMPAGES; + return MM_FILEPAGES; +} + +static inline int mm_counter(struct page *page) +{ + if (PageAnon(page)) + return MM_ANONPAGES; + return mm_counter_file(page); +} + static inline unsigned long get_mm_rss(struct mm_struct *mm) { return get_mm_counter(mm, MM_FILEPAGES) + - get_mm_counter(mm, MM_ANONPAGES); + get_mm_counter(mm, MM_ANONPAGES) + + get_mm_counter(mm, MM_SHMEMPAGES); } static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) @@ -1413,6 +1461,13 @@ static inline void sync_mm_rss(struct mm_struct *mm) } #endif +#ifndef __HAVE_ARCH_PTE_DEVMAP +static inline int pte_devmap(pte_t pte) +{ + return 0; +} +#endif + int vma_wants_writenotify(struct vm_area_struct *vma); extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, @@ -1898,7 +1953,9 @@ extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); -extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); +extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); +extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); + extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, @@ -2081,7 +2138,7 @@ int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn); + pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); @@ -2116,15 +2173,6 @@ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); -#ifdef CONFIG_PROC_FS -void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); -#else -static inline void vm_stat_account(struct mm_struct *mm, - unsigned long flags, struct file *file, long pages) -{ - mm->total_vm += pages; -} -#endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_PAGEALLOC extern bool _debug_pagealloc_enabled; @@ -2200,7 +2248,14 @@ pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); -void *vmemmap_alloc_block_buf(unsigned long size, int node); +struct vmem_altmap; +void *__vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap); +static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) +{ + return __vmemmap_alloc_block_buf(size, node, NULL); +} + void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node); @@ -2222,7 +2277,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); -extern void put_hwpoison_page(struct page *page); +#define put_hwpoison_page(page) put_page(page) extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index cf55945c83fb..712e8c37a200 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -100,4 +100,6 @@ static __always_inline enum lru_list page_lru(struct page *page) return lru; } +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f8d1492a114f..d3ebb9d21a53 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -54,6 +54,8 @@ struct page { * see PAGE_MAPPING_ANON below. */ void *s_mem; /* slab first object */ + atomic_t compound_mapcount; /* first tail page */ + /* page_deferred_list().next -- second tail page */ }; /* Second double word */ @@ -61,6 +63,7 @@ struct page { union { pgoff_t index; /* Our offset within mapping. */ void *freelist; /* sl[aou]b first free object */ + /* page_deferred_list().prev -- second tail page */ }; union { @@ -81,20 +84,9 @@ struct page { union { /* - * Count of ptes mapped in - * mms, to show when page is - * mapped & limit reverse map - * searches. - * - * Used also for tail pages - * refcounting instead of - * _count. Tail pages cannot - * be mapped and keeping the - * tail page _count zero at - * all times guarantees - * get_page_unless_zero() will - * never succeed on tail - * pages. + * Count of ptes mapped in mms, to show + * when page is mapped & limit reverse + * map searches. */ atomic_t _mapcount; @@ -124,6 +116,11 @@ struct page { * Can be used as a generic list * by the page owner. */ + struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an + * lru or handled by a slab + * allocator, this points to the + * hosting device page map. + */ struct { /* slub per cpu partial pages */ struct page *next; /* Next partial slab */ #ifdef CONFIG_64BIT @@ -369,9 +366,10 @@ struct core_state { }; enum { - MM_FILEPAGES, - MM_ANONPAGES, - MM_SWAPENTS, + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ NR_MM_COUNTERS }; @@ -426,7 +424,7 @@ struct mm_struct { unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ unsigned long pinned_vm; /* Refcount permanently increased */ - unsigned long shared_vm; /* Shared pages (files) */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ unsigned long stack_vm; /* VM_GROWSUP/DOWN */ unsigned long def_flags; diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f67b2ec18e6d..89df7abedd67 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -172,7 +172,7 @@ struct dw_mci { /* For edmac */ struct dw_mci_dma_slave *dms; /* Registers's physical base address */ - void *phy_regs; + resource_size_t phy_regs; u32 cmd_status; u32 data_status; @@ -235,16 +235,10 @@ struct dw_mci_dma_ops { }; /* IP Quirks/flags. */ -/* DTO fix for command transmission with IDMAC configured */ -#define DW_MCI_QUIRK_IDMAC_DTO BIT(0) -/* delay needed between retries on some 2.11a implementations */ -#define DW_MCI_QUIRK_RETRY_DELAY BIT(1) -/* High Speed Capable - Supports HS cards (up to 50MHz) */ -#define DW_MCI_QUIRK_HIGHSPEED BIT(2) /* Unreliable card detection */ -#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) +#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0) /* Timer for broken data transfer over scheme */ -#define DW_MCI_QUIRK_BROKEN_DTO BIT(4) +#define DW_MCI_QUIRK_BROKEN_DTO BIT(1) struct dma_pdata; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8673ffe3d86e..8dd4d290ab0d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -212,7 +212,9 @@ struct mmc_host { u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ +#ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; +#endif u32 max_current_330; u32 max_current_300; u32 max_current_180; @@ -259,7 +261,6 @@ struct mmc_host { #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ -#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ @@ -289,6 +290,7 @@ struct mmc_host { #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ +#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -434,8 +436,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, int mmc_regulator_get_supply(struct mmc_host *mmc); -int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); - static inline int mmc_card_is_removable(struct mmc_host *host) { return !(host->caps & MMC_CAP_NONREMOVABLE); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 772362adf471..053824b0a412 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -56,4 +56,10 @@ void dump_mm(const struct mm_struct *mm); #define VIRTUAL_BUG_ON(cond) do { } while (0) #endif +#ifdef CONFIG_DEBUG_VM_PGFLAGS +#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) +#else +#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) +#endif + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e23a9e704536..33bb1b19273e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -195,11 +195,6 @@ static inline int is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -static inline int is_unevictable_lru(enum lru_list lru) -{ - return (lru == LRU_UNEVICTABLE); -} - struct zone_reclaim_stat { /* * The pageout code in vmscan.c keeps track of how many of the @@ -361,10 +356,10 @@ struct zone { struct per_cpu_pageset __percpu *pageset; /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. + * This is a per-zone reserve of pages that are not available + * to userspace allocations. */ - unsigned long dirty_balance_reserve; + unsigned long totalreserve_pages; #ifndef CONFIG_SPARSEMEM /* @@ -576,19 +571,17 @@ static inline bool zone_is_empty(struct zone *zone) /* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) +enum { + ZONELIST_FALLBACK, /* zonelist with fallback */ #ifdef CONFIG_NUMA - -/* - * The NUMA zonelists are doubled because we need zonelists that restrict the - * allocations to a single node for __GFP_THISNODE. - * - * [0] : Zonelist with fallback - * [1] : No fallback (__GFP_THISNODE) - */ -#define MAX_ZONELISTS 2 -#else -#define MAX_ZONELISTS 1 + /* + * The NUMA zonelists are doubled because we need zonelists that + * restrict the allocations to a single node for __GFP_THISNODE. + */ + ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ #endif + MAX_ZONELISTS +}; /* * This struct contains information about a zone in a zonelist. It is stored @@ -1207,13 +1200,13 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else -static inline int memmap_valid_within(unsigned long pfn, +static inline bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - return 1; + return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 64f36e09a790..6e4c645e1c0d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -404,7 +404,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - __u8 guid[16]; + uuid_le guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; diff --git a/include/linux/module.h b/include/linux/module.h index 3a19c79918e0..4560d8f1545d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -302,6 +302,28 @@ struct mod_tree_node { struct latch_tree_node node; }; +struct module_layout { + /* The actual code + data. */ + void *base; + /* Total size. */ + unsigned int size; + /* The size of the executable code. */ + unsigned int text_size; + /* Size of RO section of the module (text+rodata) */ + unsigned int ro_size; + +#ifdef CONFIG_MODULES_TREE_LOOKUP + struct mod_tree_node mtn; +#endif +}; + +#ifdef CONFIG_MODULES_TREE_LOOKUP +/* Only touch one cacheline for common rbtree-for-core-layout case. */ +#define __module_layout_align ____cacheline_aligned +#else +#define __module_layout_align +#endif + struct module { enum module_state state; @@ -366,37 +388,9 @@ struct module { /* Startup function. */ int (*init)(void); - /* - * If this is non-NULL, vfree() after init() returns. - * - * Cacheline align here, such that: - * module_init, module_core, init_size, core_size, - * init_text_size, core_text_size and mtn_core::{mod,node[0]} - * are on the same cacheline. - */ - void *module_init ____cacheline_aligned; - - /* Here is the actual code + data, vfree'd on unload. */ - void *module_core; - - /* Here are the sizes of the init and core sections */ - unsigned int init_size, core_size; - - /* The size of the executable code in each section. */ - unsigned int init_text_size, core_text_size; - -#ifdef CONFIG_MODULES_TREE_LOOKUP - /* - * We want mtn_core::{mod,node[0]} to be in the same cacheline as the - * above entries such that a regular lookup will only touch one - * cacheline. - */ - struct mod_tree_node mtn_core; - struct mod_tree_node mtn_init; -#endif - - /* Size of RO sections of the module (text+rodata) */ - unsigned int init_ro_size, core_ro_size; + /* Core layout: rbtree is accessed frequently, so keep together. */ + struct module_layout core_layout __module_layout_align; + struct module_layout init_layout; /* Arch-specific module values */ struct mod_arch_specific arch; @@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr); static inline bool within_module_core(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->module_core <= addr && - addr < (unsigned long)mod->module_core + mod->core_size; + return (unsigned long)mod->core_layout.base <= addr && + addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; } static inline bool within_module_init(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->module_init <= addr && - addr < (unsigned long)mod->module_init + mod->init_size; + return (unsigned long)mod->init_layout.base <= addr && + addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; } static inline bool within_module(unsigned long addr, const struct module *mod) @@ -768,9 +762,13 @@ extern int module_sysfs_initialized; #ifdef CONFIG_DEBUG_SET_MODULE_RONX extern void set_all_modules_text_rw(void); extern void set_all_modules_text_ro(void); +extern void module_enable_ro(const struct module *mod); +extern void module_disable_ro(const struct module *mod); #else static inline void set_all_modules_text_rw(void) { } static inline void set_all_modules_text_ro(void) { } +static inline void module_enable_ro(const struct module *mod) { } +static inline void module_disable_ro(const struct module *mod) { } #endif #ifdef CONFIG_GENERIC_BUG diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 79aaa9fc1a15..bf9b322cb0b0 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -9,38 +9,28 @@ #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) { - return (opt >= MRT_BASE) && (opt <= MRT_MAX); + return opt >= MRT_BASE && opt <= MRT_MAX; } -#else -static inline int ip_mroute_opt(int opt) -{ - return 0; -} -#endif -#ifdef CONFIG_IP_MROUTE -extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); -extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); -extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); -extern int ip_mr_init(void); +int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); +int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +int ip_mr_init(void); #else -static inline -int ip_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, unsigned int optlen) +static inline int ip_mroute_setsockopt(struct sock *sock, int optname, + char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } -static inline -int ip_mroute_getsockopt(struct sock *sock, - int optname, char __user *optval, int __user *optlen) +static inline int ip_mroute_getsockopt(struct sock *sock, int optname, + char __user *optval, int __user *optlen) { return -ENOPROTOOPT; } -static inline -int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { return -ENOIOCTLCMD; } @@ -49,6 +39,11 @@ static inline int ip_mr_init(void) { return 0; } + +static inline int ip_mroute_opt(int opt) +{ + return 0; +} #endif struct vif_device { @@ -64,6 +59,32 @@ struct vif_device { #define VIFF_STATIC 0x8000 +#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) +#define MFC_LINES 64 + +struct mr_table { + struct list_head list; + possible_net_t net; + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct list_head mfc_cache_array[MFC_LINES]; + struct vif_device vif_table[MAXVIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + int mroute_reg_vif_num; +}; + +/* mfc_flags: + * MFC_STATIC - the entry was added statically (not by a routing daemon) + */ +enum { + MFC_STATIC = BIT(0), +}; + struct mfc_cache { struct list_head list; __be32 mfc_mcastgrp; /* Group the entry belongs to */ @@ -89,19 +110,14 @@ struct mfc_cache { struct rcu_head rcu; }; -#define MFC_STATIC 1 -#define MFC_NOTIFY 2 - -#define MFC_LINES 64 - #ifdef __BIG_ENDIAN #define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) #else #define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) -#endif +#endif struct rtmsg; -extern int ipmr_get_route(struct net *net, struct sk_buff *skb, - __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); +int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, int nowait); #endif diff --git a/include/linux/msi.h b/include/linux/msi.h index f71a25e5fd25..1c6342ab8c0e 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -174,6 +174,7 @@ struct msi_controller { #include <asm/msi.h> struct irq_domain; +struct irq_domain_ops; struct irq_chip; struct device_node; struct fwnode_handle; @@ -279,6 +280,23 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg); void platform_msi_domain_free_irqs(struct device *dev); + +/* When an MSI domain is used as an intermediate domain */ +int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *args); +int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, + int virq, int nvec, msi_alloc_info_t *args); +struct irq_domain * +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data); +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec); +void *platform_msi_get_host_data(struct irq_domain *domain); #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 366cf77953b5..58f3ba709ade 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -142,7 +142,9 @@ #endif #ifndef map_bankwidth +#ifdef CONFIG_MTD #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" +#endif static inline int map_bankwidth(void *map) { BUG(); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index f17fa75809aa..cc84923011c0 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -254,6 +254,17 @@ struct mtd_info { int usecount; }; +static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) +{ + mtd->dev.of_node = np; +} + +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return mtd->dev.of_node; +} + int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 5a9d1d4c2487..bdd68e22b5a5 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -129,6 +129,14 @@ typedef enum { /* Enable Hardware ECC before syndrome is read back from flash */ #define NAND_ECC_READSYN 2 +/* + * Enable generic NAND 'page erased' check. This check is only done when + * ecc.correct() returns -EBADMSG. + * Set this flag if your implementation does not fix bitflips in erased + * pages and you want to rely on the default implementation. + */ +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) + /* Bit mask for flags passed to do_nand_read_ecc */ #define NAND_GET_DEVICE 0x80 @@ -276,15 +284,15 @@ struct nand_onfi_params { __le16 t_r; __le16 t_ccs; __le16 src_sync_timing_mode; - __le16 src_ssync_features; + u8 src_ssync_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; u8 input_pin_capacitance_max; u8 driver_strength_support; __le16 t_int_r; - __le16 t_ald; - u8 reserved4[7]; + __le16 t_adl; + u8 reserved4[8]; /* vendor */ __le16 vendor_revision; @@ -407,7 +415,7 @@ struct nand_jedec_params { __le16 input_pin_capacitance_typ; __le16 clk_pin_capacitance_typ; u8 driver_strength_support; - __le16 t_ald; + __le16 t_adl; u8 reserved4[36]; /* ECC and endurance block */ @@ -451,12 +459,19 @@ struct nand_hw_control { * @total: total number of ECC bytes per page * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators + * @options: ECC specific options (see NAND_ECC_XXX flags defined above) * @layout: ECC layout control struct pointer * @priv: pointer to private ECC control data * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available * @calculate: function for ECC calculation or readback from ECC hardware - * @correct: function for ECC correction, matching to ECC generator (sw/hw) + * @correct: function for ECC correction, matching to ECC generator (sw/hw). + * Should return a positive number representing the number of + * corrected bitflips, -EBADMSG if the number of bitflips exceed + * ECC strength, or any other error code if the error is not + * directly related to correction. + * If -EBADMSG is returned the input buffers should be left + * untouched. * @read_page_raw: function to read a raw page without ECC. This function * should hide the specific layout used by the ECC * controller and always return contiguous in-band and @@ -494,6 +509,7 @@ struct nand_ecc_ctrl { int strength; int prepad; int postpad; + unsigned int options; struct nand_ecclayout *layout; void *priv; void (*hwctl)(struct mtd_info *mtd, int mode); @@ -540,11 +556,11 @@ struct nand_buffers { /** * struct nand_chip - NAND Private Flash Chip Data + * @mtd: MTD device registered to the MTD framework * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the * flash device * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the * flash device. - * @flash_node: [BOARDSPECIFIC] device node describing this instance * @read_byte: [REPLACEABLE] read one byte from the chip * @read_word: [REPLACEABLE] read one word from the chip * @write_byte: [REPLACEABLE] write a single byte to the chip on the @@ -640,11 +656,10 @@ struct nand_buffers { */ struct nand_chip { + struct mtd_info mtd; void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; - struct device_node *flash_node; - uint8_t (*read_byte)(struct mtd_info *mtd); u16 (*read_word)(struct mtd_info *mtd); void (*write_byte)(struct mtd_info *mtd, uint8_t byte); @@ -719,6 +734,37 @@ struct nand_chip { void *priv; }; +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + mtd_set_of_node(&chip->mtd, np); +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return mtd_get_of_node(&chip->mtd); +} + +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_chip, mtd); +} + +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) +{ + return &chip->mtd; +} + +static inline void *nand_get_controller_data(struct nand_chip *chip) +{ + return chip->priv; +} + +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) +{ + chip->priv = priv; +} + /* * NAND Flash Manufacturer ID Codes */ @@ -907,15 +953,6 @@ struct platform_nand_data { struct platform_nand_ctrl ctrl; }; -/* Some helpers to access the data structures */ -static inline -struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) -{ - struct nand_chip *chip = mtd->priv; - - return chip->priv; -} - /* return the supported features. */ static inline int onfi_feature(struct nand_chip *chip) { diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h index 74acf5367556..fb0bc3420a10 100644 --- a/include/linux/mtd/nand_bch.h +++ b/include/linux/mtd/nand_bch.h @@ -55,7 +55,7 @@ static inline int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { - return -1; + return -ENOTSUPP; } static inline struct nand_bch_control * diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 6a35e6de5da1..70736e1e6c8f 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -41,7 +41,6 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ }; #define MTDPART_OFS_RETAIN (-3) @@ -56,11 +55,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information */ struct mtd_part_parser_data { unsigned long origin; - struct device_node *of_node; }; @@ -72,13 +69,33 @@ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; - int (*parse_fn)(struct mtd_info *, struct mtd_partition **, + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); }; -extern void register_mtd_parser(struct mtd_part_parser *parser); +/* Container for passing around a set of parsed partitions */ +struct mtd_partitions { + const struct mtd_partition *parts; + int nr_parts; + const struct mtd_part_parser *parser; +}; + +extern int __register_mtd_parser(struct mtd_part_parser *parser, + struct module *owner); +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE) + extern void deregister_mtd_parser(struct mtd_part_parser *parser); +/* + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't + * do anything special in module init/exit. Each driver may only use this macro + * once, and calling it replaces module_init() and module_exit(). + */ +#define module_mtd_part_parser(__mtd_part_parser) \ + module_driver(__mtd_part_parser, register_mtd_parser, \ + deregister_mtd_parser) + int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 1c28f8879b1c..2251add65fa7 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -143,11 +143,11 @@ enum flctl_ecc_res_t { struct dma_chan; struct sh_flctl { - struct mtd_info mtd; struct nand_chip chip; struct platform_device *pdev; struct dev_pm_qos_request pm_qos; void __iomem *reg; + resource_size_t fifo; uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ int read_bytes; @@ -186,7 +186,7 @@ struct sh_flctl_platform_data { static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) { - return container_of(mtdinfo, struct sh_flctl, mtd); + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip); } #endif /* __SH_FLCTL_H__ */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index c8723b62c4cd..62356d50815b 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -12,6 +12,7 @@ #include <linux/bitops.h> #include <linux/mtd/cfi.h> +#include <linux/mtd/mtd.h> /* * Manufacturer IDs @@ -25,7 +26,7 @@ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX #define SNOR_MFR_SPANSION CFI_MFR_AMD #define SNOR_MFR_SST CFI_MFR_SST -#define SNOR_MFR_WINBOND 0xef +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ /* * Note on opcode nomenclature: some opcodes have a format like @@ -117,14 +118,11 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), }; -struct mtd_info; - /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. - * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -144,7 +142,8 @@ struct mtd_info; * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR - * at the offset @offs + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is @@ -155,7 +154,6 @@ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; - struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -185,6 +183,17 @@ struct spi_nor { void *priv; }; +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + struct device_node *np) +{ + mtd_set_of_node(&nor->mtd, np); +} + +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return mtd_get_of_node(&nor->mtd); +} + /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure diff --git a/include/linux/namei.h b/include/linux/namei.h index d8c6334cd150..d0f25d81b46a 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -77,6 +77,7 @@ extern struct dentry *kern_path_locked(const char *, struct path *); extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index f0d87347df19..d9654f0eecb3 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -52,7 +52,7 @@ enum { NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ - NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ + NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ NETIF_F_RXHASH_BIT, /* Receive hashing offload */ @@ -103,7 +103,7 @@ enum { #define NETIF_F_NTUPLE __NETIF_F(NTUPLE) #define NETIF_F_RXCSUM __NETIF_F(RXCSUM) #define NETIF_F_RXHASH __NETIF_F(RXHASH) -#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) +#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC) #define NETIF_F_SG __NETIF_F(SG) #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) @@ -146,10 +146,12 @@ enum { #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ NETIF_F_TSO6 | NETIF_F_UFO) -#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM -#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) -#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) -#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) +/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be + * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- + * this would be contradictory + */ +#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_HW_CSUM) #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3143c847bddb..5ac140dcb789 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -132,7 +132,9 @@ static inline bool dev_xmit_complete(int rc) * used. */ -#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_HYPERV_NET) +# define LL_MAX_HEADER 128 +#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else @@ -326,7 +328,8 @@ enum { NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ - NAPI_STATE_HASHED, /* In NAPI hash */ + NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ }; enum gro_result { @@ -461,19 +464,13 @@ static inline void napi_complete(struct napi_struct *n) } /** - * napi_by_id - lookup a NAPI by napi_id - * @napi_id: hashed napi_id - * - * lookup @napi_id in napi_hash table - * must be called under rcu_read_lock() - */ -struct napi_struct *napi_by_id(unsigned int napi_id); - -/** * napi_hash_add - add a NAPI to global hashtable * @napi: napi context * * generate a new napi_id and store a @napi under it in napi_hash + * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) + * Note: This is normally automatically done from netif_napi_add(), + * so might disappear in a future linux version. */ void napi_hash_add(struct napi_struct *napi); @@ -482,9 +479,14 @@ void napi_hash_add(struct napi_struct *napi); * @napi: napi context * * Warning: caller must observe rcu grace period - * before freeing memory containing @napi + * before freeing memory containing @napi, if + * this function returns true. + * Note: core networking stack automatically calls it + * from netif_napi_del() + * Drivers might want to call this helper to combine all + * the needed rcu grace periods into a single one. */ -void napi_hash_del(struct napi_struct *napi); +bool napi_hash_del(struct napi_struct *napi); /** * napi_disable - prevent NAPI from scheduling @@ -810,6 +812,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. + * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, * void *accel_priv, select_queue_fallback_t fallback); * Called to decide which queue to when device supports multiple @@ -957,12 +965,6 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * Called to release previously enslaved netdev. * * Feature/offload setting functions. - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, - * netdev_features_t features); - * Adjusts the requested feature flags according to device-specific - * constraints, and returns the resulting flags. Must not modify - * the device state. - * * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). @@ -1011,6 +1013,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * a new port starts listening. The operation is protected by the * vxlan_net->sock_lock. * + * void (*ndo_add_geneve_port)(struct net_device *dev, + * sa_family_t sa_family, __be16 port); + * Called by geneve to notify a driver about the UDP port and socket + * address family that geneve is listnening to. It is called only when + * a new port starts listening. The operation is protected by the + * geneve_net->sock_lock. + * + * void (*ndo_del_geneve_port)(struct net_device *dev, + * sa_family_t sa_family, __be16 port); + * Called by geneve to notify the driver about a UDP port and socket + * address family that geneve is not listening to anymore. The operation + * is protected by the geneve_net->sock_lock. + * * void (*ndo_del_vxlan_port)(struct net_device *dev, * sa_family_t sa_family, __be16 port); * Called by vxlan to notify the driver about a UDP port and socket @@ -1066,8 +1081,11 @@ struct net_device_ops { void (*ndo_uninit)(struct net_device *dev); int (*ndo_open)(struct net_device *dev); int (*ndo_stop)(struct net_device *dev); - netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, - struct net_device *dev); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + struct net_device *dev); + netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, void *accel_priv, @@ -1215,7 +1233,12 @@ struct net_device_ops { void (*ndo_del_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); - + void (*ndo_add_geneve_port)(struct net_device *dev, + sa_family_t sa_family, + __be16 port); + void (*ndo_del_geneve_port)(struct net_device *dev, + sa_family_t sa_family, + __be16 port); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@ -1225,9 +1248,6 @@ struct net_device_ops { struct net_device *dev, void *priv); int (*ndo_get_lock_subclass)(struct net_device *dev); - netdev_features_t (*ndo_features_check) (struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, u32 maxrate); @@ -1271,6 +1291,7 @@ struct net_device_ops { * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device + * @IFF_TEAM: device is a team device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1297,6 +1318,7 @@ enum netdev_priv_flags { IFF_NO_QUEUE = 1<<21, IFF_OPENVSWITCH = 1<<22, IFF_L3MDEV_SLAVE = 1<<23, + IFF_TEAM = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1323,6 +1345,7 @@ enum netdev_priv_flags { #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE +#define IFF_TEAM IFF_TEAM /** * struct net_device - The DEVICE structure. @@ -1716,7 +1739,9 @@ struct net_device { #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif - +#ifdef CONFIG_NET_CLS_ACT + struct tcf_proto __rcu *egress_cl_list; +#endif #ifdef CONFIG_NET_SWITCHDEV u32 offload_fwd_mark; #endif @@ -1949,6 +1974,26 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight); /** + * netif_tx_napi_add - initialize a napi context + * @dev: network device + * @napi: napi context + * @poll: polling function + * @weight: default weight + * + * This variant of netif_napi_add() should be used from drivers using NAPI + * to exclusively poll a TX queue. + * This will avoid we add it into napi_hash[], thus polluting this hash table. + */ +static inline void netif_tx_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); + netif_napi_add(dev, napi, poll, weight); +} + +/** * netif_napi_del - remove a napi context * @napi: napi context * @@ -2086,6 +2131,24 @@ struct pcpu_sw_netstats { #define netdev_alloc_pcpu_stats(type) \ __netdev_alloc_pcpu_stats(type, GFP_KERNEL) +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN, + NETDEV_LAG_TX_TYPE_RANDOM, + NETDEV_LAG_TX_TYPE_BROADCAST, + NETDEV_LAG_TX_TYPE_ROUNDROBIN, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, + NETDEV_LAG_TX_TYPE_HASH, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; +}; + +struct netdev_lag_lower_state_info { + u8 link_up : 1, + tx_enabled : 1; +}; + #include <linux/notifier.h> /* netdevice notifier chain. Please remember to update the rtnetlink @@ -2121,6 +2184,7 @@ struct pcpu_sw_netstats { #define NETDEV_CHANGEINFODATA 0x0018 #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A +#define NETDEV_CHANGELOWERSTATE 0x001B int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2139,6 +2203,12 @@ struct netdev_notifier_changeupper_info { struct net_device *upper_dev; /* new upper dev */ bool master; /* is upper dev master */ bool linking; /* is the nofication for link or unlink */ + void *upper_info; /* upper dev info */ +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; /* must be first */ + void *lower_state_info; /* is lower dev state */ }; static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, @@ -2472,6 +2542,71 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +struct skb_csum_offl_spec { + __u16 ipv4_okay:1, + ipv6_okay:1, + encap_okay:1, + ip_options_okay:1, + ext_hdrs_okay:1, + tcp_okay:1, + udp_okay:1, + sctp_okay:1, + vlan_okay:1, + no_encapped_ipv6:1, + no_not_encapped:1; +}; + +bool __skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help); + +static inline bool skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); +} + +static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec) +{ + bool csum_encapped; + + return skb_csum_offload_chk(skb, spec, &csum_encapped, true); +} + +static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) +{ + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .ipv6_okay = 1, + .vlan_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); +} + +static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) +{ + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + .vlan_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); +} + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3595,15 +3730,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); int netdev_master_upper_dev_link(struct net_device *dev, - struct net_device *upper_dev); -int netdev_master_upper_dev_link_private(struct net_device *dev, - struct net_device *upper_dev, - void *private); + struct net_device *upper_dev, + void *upper_priv, void *upper_info); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); +void netdev_lower_state_changed(struct net_device *lower_dev, + void *lower_state_info); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 @@ -3611,7 +3746,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev, - bool (*type_check)(struct net_device *dev)); + bool (*type_check)(const struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); @@ -3641,13 +3776,37 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth); static inline bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); + if (protocol == htons(ETH_P_FCOE)) + return !!(features & NETIF_F_FCOE_CRC); + + /* Assume this is an IP checksum (not SCTP CRC) */ + + if (features & NETIF_F_HW_CSUM) { + /* Can checksum everything */ + return true; + } + + switch (protocol) { + case htons(ETH_P_IP): + return !!(features & NETIF_F_IP_CSUM); + case htons(ETH_P_IPV6): + return !!(features & NETIF_F_IPV6_CSUM); + default: + return false; + } +} + +/* Map an ethertype into IP protocol if possible */ +static inline int eproto_to_ipproto(int eproto) +{ + switch (eproto) { + case htons(ETH_P_IP): + return IPPROTO_IP; + case htons(ETH_P_IPV6): + return IPPROTO_IPV6; + default: + return -1; + } } #ifdef CONFIG_BUG @@ -3712,15 +3871,14 @@ void linkwatch_run_queue(void); static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, netdev_features_t f2) { - if (f1 & NETIF_F_GEN_CSUM) - f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); - if (f2 & NETIF_F_GEN_CSUM) - f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); - f1 &= f2; - if (f1 & NETIF_F_GEN_CSUM) - f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); + if ((f1 ^ f2) & NETIF_F_HW_CSUM) { + if (f1 & NETIF_F_HW_CSUM) + f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + else + f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + } - return f1; + return f1 & f2; } static inline netdev_features_t netdev_get_wanted_features( @@ -3808,32 +3966,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, skb->mac_len = mac_len; } -static inline bool netif_is_macvlan(struct net_device *dev) +static inline bool netif_is_macvlan(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; } -static inline bool netif_is_macvlan_port(struct net_device *dev) +static inline bool netif_is_macvlan_port(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN_PORT; } -static inline bool netif_is_ipvlan(struct net_device *dev) +static inline bool netif_is_ipvlan(const struct net_device *dev) { return dev->priv_flags & IFF_IPVLAN_SLAVE; } -static inline bool netif_is_ipvlan_port(struct net_device *dev) +static inline bool netif_is_ipvlan_port(const struct net_device *dev) { return dev->priv_flags & IFF_IPVLAN_MASTER; } -static inline bool netif_is_bond_master(struct net_device *dev) +static inline bool netif_is_bond_master(const struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; } -static inline bool netif_is_bond_slave(struct net_device *dev) +static inline bool netif_is_bond_slave(const struct net_device *dev) { return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; } @@ -3868,6 +4026,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } +static inline bool netif_is_team_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM; +} + +static inline bool netif_is_team_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM_PORT; +} + +static inline bool netif_is_lag_master(const struct net_device *dev) +{ + return netif_is_bond_master(dev) || netif_is_team_master(dev); +} + +static inline bool netif_is_lag_port(const struct net_device *dev) +{ + return netif_is_bond_slave(dev) || netif_is_team_port(dev); +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h new file mode 100644 index 000000000000..22a16a23cd8a --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -0,0 +1,13 @@ +#ifndef _NF_CONNTRACK_SCTP_H +#define _NF_CONNTRACK_SCTP_H +/* SCTP tracking. */ + +#include <uapi/linux/netfilter/nf_conntrack_sctp.h> + +struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; +}; + +#endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 5646b24bfc64..ba0d9789eb6e 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -8,12 +8,12 @@ #include <uapi/linux/netfilter/nfnetlink.h> struct nfnl_callback { - int (*call)(struct sock *nl, struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const cda[]); - int (*call_rcu)(struct sock *nl, struct sk_buff *skb, + int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); + int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); @@ -26,8 +26,8 @@ struct nfnetlink_subsystem { __u8 subsys_id; /* nfnetlink subsystem ID */ __u8 cb_count; /* number of callbacks */ const struct nfnl_callback *cb; /* callback for individual types */ - int (*commit)(struct sk_buff *skb); - int (*abort)(struct sk_buff *skb); + int (*commit)(struct net *net, struct sk_buff *skb); + int (*abort)(struct net *net, struct sk_buff *skb); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 639e9b8b0e4d..0b41959aab9f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); @@ -153,6 +154,7 @@ struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); struct netlink_dump_control { + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e7e78537aea2..d6f9b4e6006d 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -139,10 +139,10 @@ enum nfs_opnum4 { Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS -#define LAST_NFS4_OP OP_WRITE_SAME #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_WRITE_SAME +#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { NFS4_OK = 0, @@ -592,4 +592,18 @@ enum data_content4 { NFS4_CONTENT_HOLE = 1, }; +enum pnfs_update_layout_reason { + PNFS_UPDATE_LAYOUT_UNKNOWN = 0, + PNFS_UPDATE_LAYOUT_NO_PNFS, + PNFS_UPDATE_LAYOUT_RD_ZEROLEN, + PNFS_UPDATE_LAYOUT_MDSTHRESH, + PNFS_UPDATE_LAYOUT_NOMEM, + PNFS_UPDATE_LAYOUT_BULK_RECALL, + PNFS_UPDATE_LAYOUT_IO_TEST_FAIL, + PNFS_UPDATE_LAYOUT_FOUND_CACHED, + PNFS_UPDATE_LAYOUT_RETURN, + PNFS_UPDATE_LAYOUT_BLOCKED, + PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, +}; + #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c0e961474a52..48e0320cd643 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -60,18 +60,12 @@ struct nfs_lockowner { pid_t l_pid; }; -#define NFS_IO_INPROGRESS 0 -struct nfs_io_counter { - unsigned long flags; - atomic_t io_count; -}; - struct nfs_lock_context { atomic_t count; struct list_head list; struct nfs_open_context *open_context; struct nfs_lockowner lockowner; - struct nfs_io_counter io_count; + atomic_t io_count; }; struct nfs4_state; @@ -216,7 +210,6 @@ struct nfs_inode { #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ -#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ @@ -359,6 +352,7 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); @@ -517,13 +511,25 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); -extern int nfs_wb_page(struct inode *inode, struct page* page); +extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); static inline int +nfs_wb_launder_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, true); +} + +static inline int +nfs_wb_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, false); +} + +static inline int nfs_have_writebacks(struct inode *inode) { return NFS_I(inode)->nrequests != 0; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 2469ab0bb3a1..7fcc13c8cf1f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -102,6 +102,7 @@ struct nfs_client { #define NFS_SP4_MACH_CRED_STATEID 4 /* TEST_STATEID and FREE_STATEID */ #define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */ #define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ +#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */ #endif /* CONFIG_NFS_V4 */ /* Our own IP address, as a null-terminated string. diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 11bbae44f4cb..791098a08a87 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1375,6 +1375,7 @@ enum { NFS_IOHDR_ERROR = 0, NFS_IOHDR_EOF, NFS_IOHDR_REDO, + NFS_IOHDR_STAT, }; struct nfs_pgio_header { @@ -1420,11 +1421,12 @@ struct nfs_mds_commit_info { struct list_head list; }; +struct nfs_commit_info; struct nfs_commit_data; struct nfs_inode; struct nfs_commit_completion_ops { - void (*error_cleanup) (struct nfs_inode *nfsi); void (*completion) (struct nfs_commit_data *data); + void (*resched_write) (struct nfs_commit_info *, struct nfs_page *); }; struct nfs_commit_info { @@ -1454,12 +1456,14 @@ struct nfs_commit_data { const struct rpc_call_ops *mds_ops; const struct nfs_commit_completion_ops *completion_ops; int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); + unsigned long flags; }; struct nfs_pgio_completion_ops { void (*error_cleanup)(struct list_head *head); void (*init_hdr)(struct nfs_pgio_header *hdr); void (*completion)(struct nfs_pgio_header *hdr); + void (*reschedule_io)(struct nfs_pgio_header *hdr); }; struct nfs_unlinkdata { diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h deleted file mode 100644 index 9acb21572eaf..000000000000 --- a/include/linux/nwpserial.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Serial Port driver for a NWP uart device - * - * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ -#ifndef _NWPSERIAL_H -#define _NWPSERIAL_H - -int nwpserial_register_port(struct uart_port *port); -void nwpserial_unregister_port(int line); - -#endif /* _NWPSERIAL_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 507daad0bc8d..01c0a556448b 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -3,6 +3,7 @@ #include <linux/ioport.h> #include <linux/errno.h> #include <linux/of.h> +#include <linux/io.h> struct of_pci_range_parser { struct device_node *node; @@ -36,6 +37,8 @@ extern struct device_node *of_find_matching_node_by_address( const struct of_device_id *matches, u64 base_address); extern void __iomem *of_iomap(struct device_node *device, int index); +void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name); /* Extract an address from a device, returns the region size and * the address space flags too. The PCI version uses a BAR number @@ -57,6 +60,11 @@ extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size); extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ +static inline void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name) +{ + return IOMEM_ERR_PTR(-EINVAL); +} static inline u64 of_translate_address(struct device_node *np, const __be32 *addr) @@ -112,12 +120,7 @@ static inline bool of_dma_is_coherent(struct device_node *np) extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); -void __iomem *of_io_request_and_map(struct device_node *device, - int index, const char *name); #else - -#include <linux/io.h> - static inline int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { @@ -128,12 +131,6 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) { return NULL; } - -static inline void __iomem *of_io_request_and_map(struct device_node *device, - int index, const char *name) -{ - return IOMEM_ERR_PTR(-EINVAL); -} #endif #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 87d6d1632dd4..092186c62ff4 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -51,8 +51,14 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) extern int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); -extern int of_mm_gpiochip_add(struct device_node *np, - struct of_mm_gpio_chip *mm_gc); +extern int of_mm_gpiochip_add_data(struct device_node *np, + struct of_mm_gpio_chip *mm_gc, + void *data); +static inline int of_mm_gpiochip_add(struct device_node *np, + struct of_mm_gpio_chip *mm_gc) +{ + return of_mm_gpiochip_add_data(np, mm_gc, NULL); +} extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); extern int of_gpiochip_add(struct gpio_chip *gc); @@ -67,6 +73,9 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc, static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { + if (flags) + *flags = 0; + return -ENOSYS; } diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 88fa8af2b937..1d99b61adc65 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -267,6 +267,9 @@ struct omap_dma_reg { u8 type; }; +#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) }) +struct dma_slave_map; + /* System DMA platform data structure */ struct omap_system_dma_plat_info { const struct omap_dma_reg *reg_map; @@ -278,6 +281,9 @@ struct omap_system_dma_plat_info { void (*clear_dma)(int lch); void (*dma_write)(u32 val, int reg, int lch); u32 (*dma_read)(int reg, int lch); + + const struct dma_slave_map *slave_map; + int slavecnt; }; #ifdef CONFIG_ARCH_OMAP2PLUS diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index bb53c7b86315..19724e6ebd26 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -101,9 +101,6 @@ enum pageflags { #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - PG_compound_lock, -#endif #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) PG_young, PG_idle, @@ -129,53 +126,104 @@ enum pageflags { /* SLOB */ PG_slob_free = PG_private, + + /* Compound pages. Stored in first tail page's flags */ + PG_double_map = PG_private_2, }; #ifndef __GENERATING_BOUNDS_H +struct page; /* forward declaration */ + +static inline struct page *compound_head(struct page *page) +{ + unsigned long head = READ_ONCE(page->compound_head); + + if (unlikely(head & 1)) + return (struct page *) (head - 1); + return page; +} + +static inline int PageTail(struct page *page) +{ + return READ_ONCE(page->compound_head) & 1; +} + +static inline int PageCompound(struct page *page) +{ + return test_bit(PG_head, &page->flags) || PageTail(page); +} + +/* + * Page flags policies wrt compound pages + * + * PF_ANY: + * the page flag is relevant for small, head and tail pages. + * + * PF_HEAD: + * for compound page all operations related to the page flag applied to + * head page. + * + * PF_NO_TAIL: + * modifications of the page flag must be done on small or head pages, + * checks can be done on tail pages too. + * + * PF_NO_COMPOUND: + * the page flag is not relevant for compound pages. + */ +#define PF_ANY(page, enforce) page +#define PF_HEAD(page, enforce) compound_head(page) +#define PF_NO_TAIL(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ + compound_head(page);}) +#define PF_NO_COMPOUND(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ + page;}) + /* * Macros to create function definitions for page flags */ -#define TESTPAGEFLAG(uname, lname) \ -static inline int Page##uname(const struct page *page) \ - { return test_bit(PG_##lname, &page->flags); } +#define TESTPAGEFLAG(uname, lname, policy) \ +static inline int Page##uname(struct page *page) \ + { return test_bit(PG_##lname, &policy(page, 0)->flags); } -#define SETPAGEFLAG(uname, lname) \ +#define SETPAGEFLAG(uname, lname, policy) \ static inline void SetPage##uname(struct page *page) \ - { set_bit(PG_##lname, &page->flags); } + { set_bit(PG_##lname, &policy(page, 1)->flags); } -#define CLEARPAGEFLAG(uname, lname) \ +#define CLEARPAGEFLAG(uname, lname, policy) \ static inline void ClearPage##uname(struct page *page) \ - { clear_bit(PG_##lname, &page->flags); } + { clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define __SETPAGEFLAG(uname, lname) \ +#define __SETPAGEFLAG(uname, lname, policy) \ static inline void __SetPage##uname(struct page *page) \ - { __set_bit(PG_##lname, &page->flags); } + { __set_bit(PG_##lname, &policy(page, 1)->flags); } -#define __CLEARPAGEFLAG(uname, lname) \ +#define __CLEARPAGEFLAG(uname, lname, policy) \ static inline void __ClearPage##uname(struct page *page) \ - { __clear_bit(PG_##lname, &page->flags); } + { __clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define TESTSETFLAG(uname, lname) \ +#define TESTSETFLAG(uname, lname, policy) \ static inline int TestSetPage##uname(struct page *page) \ - { return test_and_set_bit(PG_##lname, &page->flags); } + { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } -#define TESTCLEARFLAG(uname, lname) \ +#define TESTCLEARFLAG(uname, lname, policy) \ static inline int TestClearPage##uname(struct page *page) \ - { return test_and_clear_bit(PG_##lname, &page->flags); } - -#define __TESTCLEARFLAG(uname, lname) \ -static inline int __TestClearPage##uname(struct page *page) \ - { return __test_and_clear_bit(PG_##lname, &page->flags); } + { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } -#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ - SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) +#define PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + SETPAGEFLAG(uname, lname, policy) \ + CLEARPAGEFLAG(uname, lname, policy) -#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ - __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) +#define __PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + __SETPAGEFLAG(uname, lname, policy) \ + __CLEARPAGEFLAG(uname, lname, policy) -#define TESTSCFLAG(uname, lname) \ - TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) +#define TESTSCFLAG(uname, lname, policy) \ + TESTSETFLAG(uname, lname, policy) \ + TESTCLEARFLAG(uname, lname, policy) #define TESTPAGEFLAG_FALSE(uname) \ static inline int Page##uname(const struct page *page) { return 0; } @@ -195,56 +243,62 @@ static inline int TestSetPage##uname(struct page *page) { return 0; } #define TESTCLEARFLAG_FALSE(uname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } -#define __TESTCLEARFLAG_FALSE(uname) \ -static inline int __TestClearPage##uname(struct page *page) { return 0; } - #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) #define TESTSCFLAG_FALSE(uname) \ TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) -struct page; /* forward declaration */ - -TESTPAGEFLAG(Locked, locked) -PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) -PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) - __SETPAGEFLAG(Referenced, referenced) -PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) -PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) -PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) - TESTCLEARFLAG(Active, active) -__PAGEFLAG(Slab, slab) -PAGEFLAG(Checked, checked) /* Used by some filesystems */ -PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ -PAGEFLAG(SavePinned, savepinned); /* Xen */ -PAGEFLAG(Foreign, foreign); /* Xen */ -PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) -PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) - __SETPAGEFLAG(SwapBacked, swapbacked) - -__PAGEFLAG(SlobFree, slob_free) +__PAGEFLAG(Locked, locked, PF_NO_TAIL) +PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) +PAGEFLAG(Referenced, referenced, PF_HEAD) + TESTCLEARFLAG(Referenced, referenced, PF_HEAD) + __SETPAGEFLAG(Referenced, referenced, PF_HEAD) +PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) + __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) +PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) +PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) + TESTCLEARFLAG(Active, active, PF_HEAD) +__PAGEFLAG(Slab, slab, PF_NO_TAIL) +__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) +PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ + +/* Xen */ +PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) + TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) +PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); +PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); + +PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) + __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) +PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) + __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) + __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) /* * Private page markings that may be used by the filesystem that owns the page * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ -PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) - __CLEARPAGEFLAG(Private, private) -PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) -PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) +PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) + __CLEARPAGEFLAG(Private, private, PF_ANY) +PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) +PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) + TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ -TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) -PAGEFLAG(MappedToDisk, mappedtodisk) +TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) + TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ -PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) -PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) +PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND) + TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND) +PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) + TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) #ifdef CONFIG_HIGHMEM /* @@ -257,31 +311,33 @@ PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP -PAGEFLAG(SwapCache, swapcache) +PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) #else PAGEFLAG_FALSE(SwapCache) #endif -PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) - TESTCLEARFLAG(Unevictable, unevictable) +PAGEFLAG(Unevictable, unevictable, PF_HEAD) + __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) + TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) #ifdef CONFIG_MMU -PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) - TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) +PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) + __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) + TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) #else PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) - TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) + TESTSCFLAG_FALSE(Mlocked) #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED -PAGEFLAG(Uncached, uncached) +PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) #else PAGEFLAG_FALSE(Uncached) #endif #ifdef CONFIG_MEMORY_FAILURE -PAGEFLAG(HWPoison, hwpoison) -TESTSCFLAG(HWPoison, hwpoison) +PAGEFLAG(HWPoison, hwpoison, PF_ANY) +TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) #else PAGEFLAG_FALSE(HWPoison) @@ -289,10 +345,10 @@ PAGEFLAG_FALSE(HWPoison) #endif #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) -TESTPAGEFLAG(Young, young) -SETPAGEFLAG(Young, young) -TESTCLEARFLAG(Young, young) -PAGEFLAG(Idle, idle) +TESTPAGEFLAG(Young, young, PF_ANY) +SETPAGEFLAG(Young, young, PF_ANY) +TESTCLEARFLAG(Young, young, PF_ANY) +PAGEFLAG(Idle, idle, PF_ANY) #endif /* @@ -317,6 +373,7 @@ PAGEFLAG(Idle, idle) static inline int PageAnon(struct page *page) { + page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; } @@ -329,6 +386,7 @@ static inline int PageAnon(struct page *page) */ static inline int PageKsm(struct page *page) { + page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } @@ -340,8 +398,9 @@ u64 stable_page_flags(struct page *page); static inline int PageUptodate(struct page *page) { - int ret = test_bit(PG_uptodate, &(page)->flags); - + int ret; + page = compound_head(page); + ret = test_bit(PG_uptodate, &(page)->flags); /* * Must ensure that the data we read out of the page is loaded * _after_ we've loaded page->flags to check for PageUptodate. @@ -358,22 +417,24 @@ static inline int PageUptodate(struct page *page) static inline void __SetPageUptodate(struct page *page) { + VM_BUG_ON_PAGE(PageTail(page), page); smp_wmb(); - __set_bit(PG_uptodate, &(page)->flags); + __set_bit(PG_uptodate, &page->flags); } static inline void SetPageUptodate(struct page *page) { + VM_BUG_ON_PAGE(PageTail(page), page); /* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the page * uptodate are actually visible before PageUptodate becomes true. */ smp_wmb(); - set_bit(PG_uptodate, &(page)->flags); + set_bit(PG_uptodate, &page->flags); } -CLEARPAGEFLAG(Uptodate, uptodate) +CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) int test_clear_page_writeback(struct page *page); int __test_set_page_writeback(struct page *page, bool keep_write); @@ -393,12 +454,7 @@ static inline void set_page_writeback_keepwrite(struct page *page) test_set_page_writeback_keepwrite(page); } -__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) - -static inline int PageTail(struct page *page) -{ - return READ_ONCE(page->compound_head) & 1; -} +__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) static inline void set_compound_head(struct page *page, struct page *head) { @@ -410,20 +466,6 @@ static inline void clear_compound_head(struct page *page) WRITE_ONCE(page->compound_head, 0); } -static inline struct page *compound_head(struct page *page) -{ - unsigned long head = READ_ONCE(page->compound_head); - - if (unlikely(head & 1)) - return (struct page *) (head - 1); - return page; -} - -static inline int PageCompound(struct page *page) -{ - return PageHead(page) || PageTail(page); - -} #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { @@ -484,22 +526,43 @@ static inline int PageTransTail(struct page *page) return PageTail(page); } -#else - -static inline int PageTransHuge(struct page *page) +/* + * PageDoubleMap indicates that the compound page is mapped with PTEs as well + * as PMDs. + * + * This is required for optimization of rmap operations for THP: we can postpone + * per small page mapcount accounting (and its overhead from atomic operations) + * until the first PMD split. + * + * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up + * by one. This reference will go away with last compound_mapcount. + * + * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). + */ +static inline int PageDoubleMap(struct page *page) { - return 0; + return PageHead(page) && test_bit(PG_double_map, &page[1].flags); } -static inline int PageTransCompound(struct page *page) +static inline int TestSetPageDoubleMap(struct page *page) { - return 0; + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_set_bit(PG_double_map, &page[1].flags); } -static inline int PageTransTail(struct page *page) +static inline int TestClearPageDoubleMap(struct page *page) { - return 0; + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_clear_bit(PG_double_map, &page[1].flags); } + +#else +TESTPAGEFLAG_FALSE(TransHuge) +TESTPAGEFLAG_FALSE(TransCompound) +TESTPAGEFLAG_FALSE(TransTail) +TESTPAGEFLAG_FALSE(DoubleMap) + TESTSETFLAG_FALSE(DoubleMap) + TESTCLEARFLAG_FALSE(DoubleMap) #endif /* @@ -583,12 +646,6 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) #define __PG_MLOCKED 0 #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define __PG_COMPOUND_LOCK (1 << PG_compound_lock) -#else -#define __PG_COMPOUND_LOCK 0 -#endif - /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. @@ -598,8 +655,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | \ - __PG_COMPOUND_LOCK) + 1 << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -626,6 +682,10 @@ static inline int page_has_private(struct page *page) return !!(page->flags & PAGE_FLAGS_PRIVATE); } +#undef PF_ANY +#undef PF_HEAD +#undef PF_NO_TAIL +#undef PF_NO_COMPOUND #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 26eabf5ec718..4d08b6c33557 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -394,10 +394,21 @@ static inline struct page *read_mapping_page(struct address_space *mapping, */ static inline pgoff_t page_to_pgoff(struct page *page) { + pgoff_t pgoff; + if (unlikely(PageHeadHuge(page))) return page->index << compound_order(page); - else + + if (likely(!PageTransTail(page))) return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + + /* + * We don't initialize ->index for tail pages: calculate based on + * head page + */ + pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff += page - compound_head(page); + return pgoff; } /* @@ -433,18 +444,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); -static inline void __set_page_locked(struct page *page) -{ - __set_bit(PG_locked, &page->flags); -} - -static inline void __clear_page_locked(struct page *page) -{ - __clear_bit(PG_locked, &page->flags); -} - static inline int trylock_page(struct page *page) { + page = compound_head(page); return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); } @@ -497,9 +499,9 @@ extern int wait_on_page_bit_killable_timeout(struct page *page, static inline int wait_on_page_locked_killable(struct page *page) { - if (PageLocked(page)) - return wait_on_page_bit_killable(page, PG_locked); - return 0; + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); } extern wait_queue_head_t *page_waitqueue(struct page *page); @@ -518,7 +520,7 @@ static inline void wake_up_page(struct page *page, int bit) static inline void wait_on_page_locked(struct page *page) { if (PageLocked(page)) - wait_on_page_bit(page, PG_locked); + wait_on_page_bit(compound_head(page), PG_locked); } /* @@ -664,17 +666,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __set_page_locked() against it. + * the page is new, so we can just run __SetPageLocked() against it. */ static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; - __set_page_locked(page); + __SetPageLocked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) - __clear_page_locked(page); + __ClearPageLocked(page); return error; } diff --git a/include/linux/pci.h b/include/linux/pci.h index 6ae25aae88fd..d86378c226fb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1946,6 +1946,16 @@ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } #endif /* CONFIG_OF */ +#ifdef CONFIG_ACPI +struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); + +void +pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); +#else +static inline struct irq_domain * +pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d9ba49cedc5d..1acbefc4bbda 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2495,6 +2495,8 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff +#define PCI_VENDOR_ID_NETRONOME 0x19ee + #define PCI_VENDOR_ID_QMI 0x1a32 #define PCI_VENDOR_ID_AZWAVE 0x1a3b diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 12c9b485beb7..84f542df7ff5 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -116,7 +116,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); */ static inline void percpu_ref_kill(struct percpu_ref *ref) { - return percpu_ref_kill_and_confirm(ref, NULL); + percpu_ref_kill_and_confirm(ref, NULL); } /* diff --git a/include/linux/percpu.h b/include/linux/percpu.h index caebf2a758dc..4bc6dafb703e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -18,12 +18,6 @@ #define PERCPU_MODULE_RESERVE 0 #endif -#ifndef PERCPU_ENOUGH_ROOM -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) -#endif - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bfa673bb822d..83b5e34c6580 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -111,8 +111,6 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int armpmu_register(struct arm_pmu *armpmu, int type); - u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 7646637221f3..2d8e49711b63 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -3,11 +3,21 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> + +/* + * pfn_t: encapsulates a page-frame number that is optionally backed + * by memmap (struct page). Whether a pfn_t has a 'struct page' + * backing is indicated by flags in the high bits of the value. + */ +typedef struct { + unsigned long val; +} pfn_t; #endif #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) +#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) #endif diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h new file mode 100644 index 000000000000..0703b5360d31 --- /dev/null +++ b/include/linux/pfn_t.h @@ -0,0 +1,102 @@ +#ifndef _LINUX_PFN_T_H_ +#define _LINUX_PFN_T_H_ +#include <linux/mm.h> + +/* + * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags + * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry + * PFN_SG_LAST - pfn references a page and is the last scatterlist entry + * PFN_DEV - pfn is not covered by system memmap by default + * PFN_MAP - pfn has a dynamic page mapping established by a device driver + */ +#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \ + << (BITS_PER_LONG - PAGE_SHIFT)) +#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1)) +#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2)) +#define PFN_DEV (1UL << (BITS_PER_LONG - 3)) +#define PFN_MAP (1UL << (BITS_PER_LONG - 4)) + +static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags) +{ + pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; + + return pfn_t; +} + +/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */ +static inline pfn_t pfn_to_pfn_t(unsigned long pfn) +{ + return __pfn_to_pfn_t(pfn, 0); +} + +extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags); + +static inline bool pfn_t_has_page(pfn_t pfn) +{ + return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0; +} + +static inline unsigned long pfn_t_to_pfn(pfn_t pfn) +{ + return pfn.val & ~PFN_FLAGS_MASK; +} + +static inline struct page *pfn_t_to_page(pfn_t pfn) +{ + if (pfn_t_has_page(pfn)) + return pfn_to_page(pfn_t_to_pfn(pfn)); + return NULL; +} + +static inline dma_addr_t pfn_t_to_phys(pfn_t pfn) +{ + return PFN_PHYS(pfn_t_to_pfn(pfn)); +} + +static inline void *pfn_t_to_virt(pfn_t pfn) +{ + if (pfn_t_has_page(pfn)) + return __va(pfn_t_to_phys(pfn)); + return NULL; +} + +static inline pfn_t page_to_pfn_t(struct page *page) +{ + return pfn_to_pfn_t(page_to_pfn(page)); +} + +static inline int pfn_t_valid(pfn_t pfn) +{ + return pfn_valid(pfn_t_to_pfn(pfn)); +} + +#ifdef CONFIG_MMU +static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pte(pfn_t_to_pfn(pfn), pgprot); +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); +} +#endif + +#ifdef __HAVE_ARCH_PTE_DEVMAP +static inline bool pfn_t_devmap(pfn_t pfn) +{ + const unsigned long flags = PFN_DEV|PFN_MAP; + + return (pfn.val & flags) == flags; +} +#else +static inline bool pfn_t_devmap(pfn_t pfn) +{ + return false; +} +pte_t pte_mkdevmap(pte_t pte); +pmd_t pmd_mkdevmap(pmd_t pmd); +#endif +#endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 05fde31b6dc6..d6f3641e7933 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -16,8 +16,10 @@ #ifndef __PHY_H #define __PHY_H +#include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ethtool.h> +#include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/timer.h> @@ -58,6 +60,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_HAS_MAGICANEG 0x00000002 #define PHY_IS_INTERNAL 0x00000004 +#define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ typedef enum { @@ -158,8 +161,8 @@ struct mii_bus { const char *name; char id[MII_BUS_ID_SIZE]; void *priv; - int (*read)(struct mii_bus *bus, int phy_id, int regnum); - int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); + int (*read)(struct mii_bus *bus, int addr, int regnum); + int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); int (*reset)(struct mii_bus *bus); /* @@ -178,7 +181,7 @@ struct mii_bus { struct device dev; /* list of all PHYs on bus */ - struct phy_device *phy_map[PHY_MAX_ADDR]; + struct mdio_device *mdio_map[PHY_MAX_ADDR]; /* PHY addresses to be ignored when probing */ u32 phy_mask; @@ -187,10 +190,10 @@ struct mii_bus { u32 phy_ignore_ta_mask; /* - * Pointer to an array of interrupts, each PHY's - * interrupt at the index matching its address + * An array of interrupts, each PHY's interrupt at the index + * matching its address */ - int *irq; + int irq[PHY_MAX_ADDR]; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -212,11 +215,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); -int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); -int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); -int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); -int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); - #define PHY_INTERRUPT_DISABLED 0x0 #define PHY_INTERRUPT_ENABLED 0x80000000 @@ -361,14 +359,12 @@ struct phy_c45_device_ids { * handling, as well as handling shifts in PHY hardware state */ struct phy_device { + struct mdio_device mdio; + /* Information about the PHY type */ /* And management functions */ struct phy_driver *drv; - struct mii_bus *bus; - - struct device dev; - u32 phy_id; struct phy_c45_device_ids c45_ids; @@ -384,9 +380,6 @@ struct phy_device { phy_interface_t interface; - /* Bus address of the PHY (0-31) */ - int addr; - /* * forced speed & duplex (no autoneg) * partner speed & duplex & pause (autoneg) @@ -435,10 +428,12 @@ struct phy_device { void (*adjust_link)(struct net_device *dev); }; -#define to_phy_device(d) container_of(d, struct phy_device, dev) +#define to_phy_device(d) container_of(to_mdio_device(d), \ + struct phy_device, mdio) /* struct phy_driver: Driver structure for a particular PHY type * + * driver_data: static driver data * phy_id: The result of reading the UID registers of this PHY * type, and ANDing them with the phy_id_mask. This driver * only works for PHYs with IDs which match this field @@ -448,7 +443,6 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) - * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -459,6 +453,7 @@ struct phy_device { * supported in the driver). */ struct phy_driver { + struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; @@ -589,9 +584,14 @@ struct phy_driver { int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); - struct device_driver driver; + /* Get statistics from the phy using ethtool */ + int (*get_sset_count)(struct phy_device *dev); + void (*get_strings)(struct phy_device *dev, u8 *data); + void (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); }; -#define to_phy_driver(d) container_of(d, struct phy_driver, driver) +#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ + struct phy_driver, mdiodrv) #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff @@ -619,7 +619,7 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) if (!phydev->is_c45) return -EOPNOTSUPP; - return mdiobus_read(phydev->bus, phydev->addr, + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); } @@ -627,14 +627,12 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) * phy_read_mmd_indirect - reads data from the MMD registers * @phydev: The PHY device bus * @prtad: MMD Address - * @devad: MMD DEVAD * @addr: PHY address on the MII bus * * Description: it reads data from the MMD registers (clause 22 to access to * clause 45) of the specified phy address. */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr); +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad); /** * phy_read - Convenience function for reading a given PHY register @@ -647,7 +645,7 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, */ static inline int phy_read(struct phy_device *phydev, u32 regnum) { - return mdiobus_read(phydev->bus, phydev->addr, regnum); + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } /** @@ -662,7 +660,7 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) */ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) { - return mdiobus_write(phydev->bus, phydev->addr, regnum, val); + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** @@ -725,7 +723,7 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff); - return mdiobus_write(phydev->bus, phydev->addr, regnum, val); + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** @@ -733,14 +731,13 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, * @phydev: The PHY device * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * @data: data to write in the MMD register * * Description: Write data from the MMD registers of the specified * phy address. */ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr, u32 data); + int devad, u32 data); struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, @@ -775,6 +772,20 @@ static inline int phy_read_status(struct phy_device *phydev) return phydev->drv->read_status(phydev); } +#define phydev_err(_phydev, format, args...) \ + dev_err(&_phydev->mdio.dev, format, ##args) + +#define phydev_dbg(_phydev, format, args...) \ + dev_dbg(&_phydev->mdio.dev, format, ##args); + +static inline const char *phydev_name(const struct phy_device *phydev) +{ + return dev_name(&phydev->mdio.dev); +} + +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) + __printf(2, 3); +void phy_attached_info(struct phy_device *phydev); int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); @@ -787,8 +798,9 @@ int genphy_resume(struct phy_device *phydev); int genphy_soft_reset(struct phy_device *phydev); void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); -int phy_driver_register(struct phy_driver *new_driver); -int phy_drivers_register(struct phy_driver *new_driver, int n); +int phy_driver_register(struct phy_driver *new_driver, struct module *owner); +int phy_drivers_register(struct phy_driver *new_driver, int n, + struct module *owner); void phy_state_machine(struct work_struct *work); void phy_change(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev, int new_link); @@ -833,7 +845,7 @@ extern struct bus_type mdio_bus_type; #define phy_module_driver(__phy_drivers, __count) \ static int __init phy_module_init(void) \ { \ - return phy_drivers_register(__phy_drivers, __count); \ + return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \ } \ module_init(phy_module_init); \ static void __exit phy_module_exit(void) \ diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h index dc2c541a619b..2e5fb870efa9 100644 --- a/include/linux/phy/omap_usb.h +++ b/include/linux/phy/omap_usb.h @@ -30,6 +30,12 @@ struct usb_dpll_params { u32 mf; }; +enum omap_usb_phy_type { + TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ + TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ + TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ +}; + struct omap_usb { struct usb_phy phy; struct phy_companion *comparator; @@ -40,11 +46,20 @@ struct omap_usb { struct clk *wkupclk; struct clk *optclk; u8 flags; + enum omap_usb_phy_type type; + struct regmap *syscon_phy_power; /* ctrl. reg. acces */ + unsigned int power_reg; /* power reg. index within syscon */ + u32 mask; + u32 power_on; + u32 power_off; }; struct usb_phy_data { const char *label; u8 flags; + u32 mask; + u32 power_on; + u32 power_off; }; /* Driver Flags */ @@ -52,6 +67,14 @@ struct usb_phy_data { #define OMAP_USB2_HAS_SET_VBUS (1 << 1) #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) +#define OMAP_DEV_PHY_PD BIT(0) +#define OMAP_USB2_PHY_PD BIT(28) + +#define AM437X_USB2_PHY_PD BIT(0) +#define AM437X_USB2_OTG_PD BIT(1) +#define AM437X_USB2_OTGVDET_EN BIT(19) +#define AM437X_USB2_OTGSESSEND_EN BIT(20) + #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) #if defined(CONFIG_OMAP_USB2) || defined(CONFIG_OMAP_USB2_MODULE) diff --git a/include/linux/pim.h b/include/linux/pim.h index 252bf6644c51..e1d756f81348 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -13,6 +13,11 @@ #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) +static inline bool ipmr_pimsm_enabled(void) +{ + return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); +} + /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ struct pimreghdr { diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index 5e0bc779e6c5..15bf56ee8af7 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -13,6 +13,9 @@ */ #define S3C64XX_AC97_GPD 0 #define S3C64XX_AC97_GPE 1 + +#include <linux/dmaengine.h> + extern void s3c64xx_ac97_setup_gpio(int); struct samsung_i2s { @@ -39,6 +42,11 @@ struct samsung_i2s { */ struct s3c_audio_pdata { int (*cfg_gpio)(struct platform_device *); + dma_filter_fn dma_filter; + void *dma_playback; + void *dma_capture; + void *dma_play_sec; + void *dma_capture_mic; union { struct samsung_i2s i2s; } type; diff --git a/include/linux/platform_data/camera-rcar.h b/include/linux/platform_data/camera-rcar.h deleted file mode 100644 index dfc83c581593..000000000000 --- a/include/linux/platform_data/camera-rcar.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Platform data for Renesas R-Car VIN soc-camera driver - * - * Copyright (C) 2011-2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __CAMERA_RCAR_H_ -#define __CAMERA_RCAR_H_ - -#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0) -#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1) -#define RCAR_VIN_BT601 (1 << 2) -#define RCAR_VIN_BT656 (1 << 3) - -struct rcar_vin_platform_data { - unsigned int flags; -}; - -#endif /* __CAMERA_RCAR_H_ */ diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h deleted file mode 100644 index 648b8ea61a22..000000000000 --- a/include/linux/platform_data/dma-rcar-hpbdma.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) 2011-2013 Renesas Electronics Corporation - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#ifndef __DMA_RCAR_HPBDMA_H -#define __DMA_RCAR_HPBDMA_H - -#include <linux/bitops.h> -#include <linux/types.h> - -/* Transmit sizes and respective register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_MAX -}; - -/* DMA control register (DCR) bits */ -#define HPB_DMAE_DCR_DTAMD (1u << 26) -#define HPB_DMAE_DCR_DTAC (1u << 25) -#define HPB_DMAE_DCR_DTAU (1u << 24) -#define HPB_DMAE_DCR_DTAU1 (1u << 23) -#define HPB_DMAE_DCR_SWMD (1u << 22) -#define HPB_DMAE_DCR_BTMD (1u << 21) -#define HPB_DMAE_DCR_PKMD (1u << 20) -#define HPB_DMAE_DCR_CT (1u << 18) -#define HPB_DMAE_DCR_ACMD (1u << 17) -#define HPB_DMAE_DCR_DIP (1u << 16) -#define HPB_DMAE_DCR_SMDL (1u << 13) -#define HPB_DMAE_DCR_SPDAM (1u << 12) -#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10) -#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10) -#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10) -#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10) -#define HPB_DMAE_DCR_SPDS_MASK (3u << 8) -#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8) -#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8) -#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8) -#define HPB_DMAE_DCR_DMDL (1u << 5) -#define HPB_DMAE_DCR_DPDAM (1u << 4) -#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2) -#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2) -#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2) -#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2) -#define HPB_DMAE_DCR_DPDS_MASK (3u << 0) -#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0) -#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0) -#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0) - -/* Asynchronous reset register (ASYNCRSTR) bits */ -#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10) -#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9) -#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8) -#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7) -#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6) -#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5) -#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4) -#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3) -#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2) -#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1) -#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0) - -struct hpb_dmae_slave_config { - unsigned int id; - dma_addr_t addr; - u32 dcr; - u32 port; - u32 rstr; - u32 mdr; - u32 mdm; - u32 flags; -#define HPB_DMAE_SET_ASYNC_RESET BIT(0) -#define HPB_DMAE_SET_ASYNC_MODE BIT(1) - u32 dma_ch; -}; - -#define HPB_DMAE_CHANNEL(_irq, _s_id) \ -{ \ - .ch_irq = _irq, \ - .s_id = _s_id, \ -} - -struct hpb_dmae_channel { - unsigned int ch_irq; - unsigned int s_id; -}; - -struct hpb_dmae_pdata { - const struct hpb_dmae_slave_config *slaves; - int num_slaves; - const struct hpb_dmae_channel *channels; - int num_channels; - const unsigned int ts_shift[XMIT_SZ_MAX]; - int num_hw_channels; -}; - -#endif diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 4299f4ba03bd..0a533f94438f 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -53,12 +53,16 @@ enum dma_event_q { #define EDMA_CTLR(i) ((i) >> 16) #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) +#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) + struct edma_rsv_info { const s16 (*rsv_chans)[2]; const s16 (*rsv_slots)[2]; }; +struct dma_slave_map; + /* platform_data for EDMA driver */ struct edma_soc_info { /* @@ -76,6 +80,9 @@ struct edma_soc_info { s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; + + const struct dma_slave_map *slave_map; + int slavecnt; }; #endif diff --git a/include/linux/platform_data/gpio-rcar.h b/include/linux/platform_data/gpio-rcar.h deleted file mode 100644 index 2d8d69432813..000000000000 --- a/include/linux/platform_data/gpio-rcar.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Renesas R-Car GPIO Support - * - * Copyright (C) 2013 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GPIO_RCAR_H__ -#define __GPIO_RCAR_H__ - -struct gpio_rcar_config { - int gpio_base; - unsigned int irq_base; - unsigned int number_of_pins; - const char *pctl_name; - unsigned has_both_edge_trigger:1; -}; - -#define RCAR_GP_PIN(bank, pin) (((bank) * 32) + (pin)) - -#endif /* __GPIO_RCAR_H__ */ diff --git a/include/linux/platform_data/irq-renesas-intc-irqpin.h b/include/linux/platform_data/irq-renesas-intc-irqpin.h deleted file mode 100644 index e4cb911066a6..000000000000 --- a/include/linux/platform_data/irq-renesas-intc-irqpin.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Renesas INTC External IRQ Pin Driver - * - * Copyright (C) 2013 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __IRQ_RENESAS_INTC_IRQPIN_H__ -#define __IRQ_RENESAS_INTC_IRQPIN_H__ - -struct renesas_intc_irqpin_config { - unsigned int sense_bitfield_width; - unsigned int irq_base; - bool control_parent; -}; - -#endif /* __IRQ_RENESAS_INTC_IRQPIN_H__ */ diff --git a/include/linux/platform_data/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h index 7ded6f1f74bc..7ded6f1f74bc 100644 --- a/include/linux/platform_data/camera-mx2.h +++ b/include/linux/platform_data/media/camera-mx2.h diff --git a/include/linux/platform_data/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h index a910dadc8258..a910dadc8258 100644 --- a/include/linux/platform_data/camera-mx3.h +++ b/include/linux/platform_data/media/camera-mx3.h diff --git a/include/linux/platform_data/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h index 6709b1cd7c77..6709b1cd7c77 100644 --- a/include/linux/platform_data/camera-pxa.h +++ b/include/linux/platform_data/media/camera-pxa.h diff --git a/include/linux/platform_data/coda.h b/include/linux/platform_data/media/coda.h index 6ad4410d9e20..6ad4410d9e20 100644 --- a/include/linux/platform_data/coda.h +++ b/include/linux/platform_data/media/coda.h diff --git a/include/media/gpio-ir-recv.h b/include/linux/platform_data/media/gpio-ir-recv.h index 0142736a59db..0c298f569d5a 100644 --- a/include/media/gpio-ir-recv.h +++ b/include/linux/platform_data/media/gpio-ir-recv.h @@ -21,4 +21,3 @@ struct gpio_ir_recv_platform_data { }; #endif /* __GPIO_IR_RECV_H__ */ - diff --git a/include/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h index 104aa892f31b..104aa892f31b 100644 --- a/include/media/ir-rx51.h +++ b/include/linux/platform_data/media/ir-rx51.h diff --git a/include/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h index 7611963a257f..7611963a257f 100644 --- a/include/media/mmp-camera.h +++ b/include/linux/platform_data/media/mmp-camera.h diff --git a/include/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h index 819767cf04d4..819767cf04d4 100644 --- a/include/media/omap1_camera.h +++ b/include/linux/platform_data/media/omap1_camera.h diff --git a/include/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h index 0d7620db5e32..0d7620db5e32 100644 --- a/include/media/omap4iss.h +++ b/include/linux/platform_data/media/omap4iss.h diff --git a/include/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h index 181642b8d0a5..bb9cacb0cbb0 100644 --- a/include/media/s5p_hdmi.h +++ b/include/linux/platform_data/media/s5p_hdmi.h @@ -34,4 +34,3 @@ struct s5p_hdmi_platform_data { }; #endif /* S5P_HDMI_H */ - diff --git a/include/media/si4713.h b/include/linux/platform_data/media/si4713.h index be4f58e2440b..932668ad54f7 100644 --- a/include/media/si4713.h +++ b/include/linux/platform_data/media/si4713.h @@ -1,5 +1,5 @@ /* - * include/media/si4713.h + * include/linux/platform_data/media/si4713.h * * Board related data definitions for Si4713 i2c device driver. * diff --git a/include/media/sii9234.h b/include/linux/platform_data/media/sii9234.h index 6a4a809fe9a3..6a4a809fe9a3 100644 --- a/include/media/sii9234.h +++ b/include/linux/platform_data/media/sii9234.h diff --git a/include/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h index 1e5065dab430..1e5065dab430 100644 --- a/include/media/soc_camera_platform.h +++ b/include/linux/platform_data/media/soc_camera_platform.h diff --git a/include/media/timb_radio.h b/include/linux/platform_data/media/timb_radio.h index a40a6a348d21..a40a6a348d21 100644 --- a/include/media/timb_radio.h +++ b/include/linux/platform_data/media/timb_radio.h diff --git a/include/media/timb_video.h b/include/linux/platform_data/media/timb_video.h index 70ae43970a49..70ae43970a49 100644 --- a/include/media/timb_video.h +++ b/include/linux/platform_data/media/timb_video.h diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h index cfda59b226ee..ca13992089b8 100644 --- a/include/linux/platform_data/microread.h +++ b/include/linux/platform_data/microread.h @@ -1,5 +1,5 @@ /* - * Driver include for the PN544 NFC chip. + * Driver include for the Inside Secure microread NFC Chip. * * Copyright (C) 2011 Tieto Poland * Copyright (C) 2012 Intel Corporation. All rights reserved. diff --git a/include/linux/platform_data/mmc-mvsdio.h b/include/linux/platform_data/mmc-mvsdio.h deleted file mode 100644 index d02704cd3695..000000000000 --- a/include/linux/platform_data/mmc-mvsdio.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __MMC_MVSDIO_H -#define __MMC_MVSDIO_H - -#include <linux/mbus.h> - -struct mvsdio_platform_data { - unsigned int clock; - int gpio_card_detect; - int gpio_write_protect; -}; - -#endif diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index d3889b98a1a1..fb5625bcca9a 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -40,6 +40,8 @@ struct s3c64xx_spi_info { int num_cs; int (*cfg_gpio)(void); dma_filter_fn filter; + void *dma_tx; + void *dma_rx; }; /** diff --git a/include/linux/platform_data/usb-rcar-phy.h b/include/linux/platform_data/usb-rcar-phy.h deleted file mode 100644 index 8ec6964a32a5..000000000000 --- a/include/linux/platform_data/usb-rcar-phy.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __USB_RCAR_PHY_H -#define __USB_RCAR_PHY_H - -#include <linux/types.h> - -struct rcar_phy_platform_data { - bool ferrite_bead:1; /* (R8A7778 only) */ - - bool port1_func:1; /* true: port 1 used by function, false: host */ - unsigned penc1:1; /* Output of the PENC1 pin in function mode */ - struct { /* Overcurrent pin control for ports 0..2 */ - bool select_3_3v:1; /* true: USB_OVCn pin, false: OVCn pin */ - /* Set to false on port 1 in function mode */ - bool active_high:1; /* true: active high, false: active low */ - /* Set to true on port 1 in function mode */ - } ovc_pin[3]; /* (R8A7778 only has 2 ports) */ -}; - -#endif /* __USB_RCAR_PHY_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index dc777be5f2e1..03b755521fd9 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -18,6 +18,7 @@ #define PLATFORM_DEVID_AUTO (-2) struct mfd_cell; +struct property_set; struct platform_device { const char *name; @@ -51,6 +52,7 @@ extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); extern int platform_get_irq(struct platform_device *, unsigned int); +extern int platform_irq_count(struct platform_device *); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); @@ -70,6 +72,8 @@ struct platform_device_info { const void *data; size_t size_data; u64 dma_mask; + + const struct property_set *pset; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); @@ -167,6 +171,8 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); +extern int platform_device_add_properties(struct platform_device *pdev, + const struct property_set *pset); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 9a2e50337af9..95403d2ccaf5 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -55,6 +55,11 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_disable(struct device *dev, unsigned long freq); struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev); +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count); +void dev_pm_opp_put_supported_hw(struct device *dev); +int dev_pm_opp_set_prop_name(struct device *dev, const char *name); +void dev_pm_opp_put_prop_name(struct device *dev); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -129,6 +134,23 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( { return ERR_PTR(-EINVAL); } + +static inline int dev_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} + +static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_put_prop_name(struct device *dev) {} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3bdbb4189780..7af093d6a4dd 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -39,6 +39,7 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -143,6 +144,10 @@ static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) { return -ENOSYS; } +static inline int pm_runtime_get_if_in_use(struct device *dev) +{ + return -EINVAL; +} static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } diff --git a/include/linux/poison.h b/include/linux/poison.h index 317e16de09e5..4a27153574e2 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -27,11 +27,15 @@ * Magic number "tsta" to indicate a static timer initializer * for the object debugging code. */ -#define TIMER_ENTRY_STATIC ((void *) 0x74737461) +#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/debug-pagealloc.c **********/ #define PAGE_POISON 0xaa +/********** mm/page_alloc.c ************/ + +#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA) + /********** mm/slab.c **********/ /* * Magic nums for obj red zoning. diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 6f14ee295822..e5e8ec40278d 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -9,16 +9,12 @@ #ifndef _POSIX_ACL_XATTR_H #define _POSIX_ACL_XATTR_H +#include <uapi/linux/xattr.h> #include <linux/posix_acl.h> -/* Extended attribute names */ -#define POSIX_ACL_XATTR_ACCESS "system.posix_acl_access" -#define POSIX_ACL_XATTR_DEFAULT "system.posix_acl_default" - /* Supported ACL a_version fields */ #define POSIX_ACL_XATTR_VERSION 0x0002 - /* An undefined entry e_id value */ #define ACL_UNDEFINED_ID (-1) diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 45f6a7b5b3cb..998d8f1c3c91 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -1,6 +1,16 @@ #ifndef __LINUX_BQ27X00_BATTERY_H__ #define __LINUX_BQ27X00_BATTERY_H__ +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27530, /* bq27530, bq27531 */ + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + /** * struct bq27xxx_plaform_data - Platform data for bq27xxx devices * @name: Name of the battery. @@ -12,20 +22,47 @@ * register to be read. The return value should either be the content of * the passed register or an error value. */ -enum bq27xxx_chip { - BQ27000 = 1, /* bq27000, bq27200 */ - BQ27010, /* bq27010, bq27210 */ - BQ27500, /* bq27500, bq27510, bq27520 */ - BQ27530, /* bq27530, bq27531 */ - BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ - BQ27545, /* bq27545 */ - BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ -}; - struct bq27xxx_platform_data { const char *name; enum bq27xxx_chip chip; int (*read)(struct device *dev, unsigned int); }; +struct bq27xxx_device_info; +struct bq27xxx_access_methods { + int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single); +}; + +struct bq27xxx_reg_cache { + int temperature; + int time_to_empty; + int time_to_empty_avg; + int time_to_full; + int charge_full; + int cycle_count; + int capacity; + int energy; + int flags; + int power_avg; + int health; +}; + +struct bq27xxx_device_info { + struct device *dev; + enum bq27xxx_chip chip; + const char *name; + struct bq27xxx_access_methods bus; + struct bq27xxx_reg_cache cache; + int charge_design_full; + unsigned long last_update; + struct delayed_work work; + struct power_supply *bat; + struct mutex lock; + u8 *regs; +}; + +void bq27xxx_battery_update(struct bq27xxx_device_info *di); +int bq27xxx_battery_setup(struct bq27xxx_device_info *di); +void bq27xxx_battery_teardown(struct bq27xxx_device_info *di); + #endif diff --git a/include/linux/powercap.h b/include/linux/powercap.h index 4e250417ee30..f0a4e6257dcc 100644 --- a/include/linux/powercap.h +++ b/include/linux/powercap.h @@ -208,7 +208,7 @@ struct powercap_zone_constraint_ops { struct powercap_zone_constraint { int id; struct powercap_zone *power_zone; - struct powercap_zone_constraint_ops *ops; + const struct powercap_zone_constraint_ops *ops; }; @@ -309,7 +309,7 @@ struct powercap_zone *powercap_register_zone( struct powercap_zone *parent, const struct powercap_zone_ops *ops, int nr_constraints, - struct powercap_zone_constraint_ops *const_ops); + const struct powercap_zone_constraint_ops *const_ops); /** * powercap_unregister_zone() - Unregister a zone device diff --git a/include/linux/printk.h b/include/linux/printk.h index 9729565c25ff..9ccbdf2c1453 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -106,13 +106,13 @@ struct va_format { /* * Dummy printk for disabled debugging statements to use whilst maintaining - * gcc's format and side-effect checking. + * gcc's format checking. */ -static inline __printf(1, 2) -int no_printk(const char *fmt, ...) -{ - return 0; -} +#define no_printk(fmt, ...) \ +do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ +} while (0) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) diff --git a/include/linux/property.h b/include/linux/property.h index 0a3705a7c9f2..b51fcd36d892 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -73,8 +73,8 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode, struct fwnode_handle *device_get_next_child_node(struct device *dev, struct fwnode_handle *child); -#define device_for_each_child_node(dev, child) \ - for (child = device_get_next_child_node(dev, NULL); child; \ +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) void fwnode_handle_put(struct fwnode_handle *fwnode); @@ -144,24 +144,100 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. - * @type: Type of the property. - * @nval: Number of items of type @type making up the value. - * @value: Value of the property (an array of @nval items of type @type). + * @length: Length of data making up the value. + * @is_array: True when the property is an array. + * @is_string: True when property is a string. + * @pointer: Pointer to the property (an array of items of the given type). + * @value: Value of the property (when it is a single item of the given type). */ struct property_entry { const char *name; - enum dev_prop_type type; - size_t nval; + size_t length; + bool is_array; + bool is_string; union { - void *raw_data; - u8 *u8_data; - u16 *u16_data; - u32 *u32_data; - u64 *u64_data; - const char **str; - } value; + union { + void *raw_data; + u8 *u8_data; + u16 *u16_data; + u32 *u32_data; + u64 *u64_data; + const char **str; + } pointer; + union { + unsigned long long raw_data; + u8 u8_data; + u16 u16_data; + u32 u32_data; + u64 u64_data; + const char *str; + } value; + }; }; +/* + * Note: the below four initializers for the anonymous union are carefully + * crafted to avoid gcc-4.4.4's problems with initialization of anon unions + * and structs. + */ + +#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ + .is_array = true, \ + .is_string = false, \ + { .pointer = { _type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ + .is_array = true, \ + .is_string = true, \ + { .pointer = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_type_), \ + .is_string = false, \ + { .value = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_val_), \ + .is_string = true, \ + { .value = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_BOOL(_name_) \ +{ \ + .name = _name_, \ +} + /** * struct property_set - Collection of "built-in" device properties. * @fwnode: Handle to be pointed to by the fwnode field of struct device. @@ -172,7 +248,8 @@ struct property_set { struct property_entry *properties; }; -void device_add_property_set(struct device *dev, struct property_set *pset); +int device_add_property_set(struct device *dev, const struct property_set *pset); +void device_remove_property_set(struct device *dev); bool device_dma_supported(struct device *dev); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index dc9a1353f971..d4a32e878180 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -25,6 +25,12 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +enum qed_led_mode { + QED_LED_MODE_OFF, + QED_LED_MODE_ON, + QED_LED_MODE_RESTORE +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) @@ -252,6 +258,17 @@ struct qed_common_ops { void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); + +/** + * @brief set_led - Configure LED mode + * + * @param cdev + * @param mode - LED mode + * + * @return 0 on success, error otherwise. + */ + int (*set_led)(struct qed_dev *cdev, + enum qed_led_mode mode); }; /** diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 5ed540986019..14ec1652daf4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old, } /** - * list_splice_init_rcu - splice an RCU-protected list into an existing list. + * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice - * @head: the place in the list to splice the first list into + * @prev: points to the last element of the existing list + * @next: points to the first element of the existing list * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * - * @head can be RCU-read traversed concurrently with this function. + * The list pointed to by @prev and @next can be RCU-read traversed + * concurrently with this function. * * Note that this function blocks. * - * Important note: the caller must take whatever action is necessary to - * prevent any other updates to @head. In principle, it is possible - * to modify the list as soon as sync() begins execution. - * If this sort of thing becomes necessary, an alternative version - * based on call_rcu() could be created. But only if -really- - * needed -- there is no shortage of RCU API members. + * Important note: the caller must take whatever action is necessary to prevent + * any other updates to the existing list. In principle, it is possible to + * modify the list as soon as sync() begins execution. If this sort of thing + * becomes necessary, an alternative version based on call_rcu() could be + * created. But only if -really- needed -- there is no shortage of RCU API + * members. */ -static inline void list_splice_init_rcu(struct list_head *list, - struct list_head *head, - void (*sync)(void)) +static inline void __list_splice_init_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next, + void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - - if (list_empty(list)) - return; /* * "first" and "last" tracking list, so initialize it. RCU readers @@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list, * this function. */ - last->next = at; - rcu_assign_pointer(list_next_rcu(head), first); - first->prev = head; - at->prev = last; + last->next = next; + rcu_assign_pointer(list_next_rcu(prev), first); + first->prev = prev; + next->prev = last; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list, + * designed for stacks. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head, head->next, sync); +} + +/** + * list_splice_tail_init_rcu - splice an RCU-protected list into an existing + * list, designed for queues. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_tail_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head->prev, head, sync); } /** @@ -305,6 +334,42 @@ static inline void list_splice_init_rcu(struct list_head *list, pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** + * list_entry_lockless - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_entry_lockless(ptr, type, member) \ + container_of((typeof(ptr))lockless_dereference(ptr), type, member) + +/** + * list_for_each_entry_lockless - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_for_each_entry_lockless(pos, head, member) \ + for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) + +/** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a0189ba67fde..14e6f47ee16f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,10 +48,17 @@ #include <asm/barrier.h> +#ifndef CONFIG_TINY_RCU extern int rcu_expedited; /* for sysctl */ +extern int rcu_normal; /* also for sysctl */ +#endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ +static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ +{ + return true; +} static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ { return false; @@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void) { } #else /* #ifdef CONFIG_TINY_RCU */ +bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); @@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -void rcu_end_inkernel_boot(void); void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); @@ -329,6 +336,12 @@ struct notifier_block; int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); +#ifndef CONFIG_TINY_RCU +void rcu_end_inkernel_boot(void); +#else /* #ifndef CONFIG_TINY_RCU */ +static inline void rcu_end_inkernel_boot(void) { } +#endif /* #ifndef CONFIG_TINY_RCU */ + #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); @@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void) */ #define RCU_NONIDLE(a) \ do { \ - rcu_irq_enter(); \ + rcu_irq_enter_irqson(); \ do { a; } while (0); \ - rcu_irq_exit(); \ + rcu_irq_exit_irqson(); \ } while (0) /* @@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void) * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * - * The tracing version of rcu_dereference_raw() must not call + * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4c1aaf9cce7b..64809aea661c 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void) { } +static inline void rcu_irq_exit_irqson(void) +{ +} + +static inline void rcu_irq_enter_irqson(void) +{ +} + static inline void rcu_irq_exit(void) { } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 60d15a080d7c..ad1eda9fa4da 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void); /* * Note a virtualization-based context switch. This is simply a * wrapper around rcu_note_context_switch(), which allows TINY_RCU - * to save a few bytes. + * to save a few bytes. The caller must have disabled interrupts. */ static inline void rcu_virt_note_context_switch(int cpu) { @@ -97,6 +97,8 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +void rcu_irq_enter_irqson(void); +void rcu_irq_exit_irqson(void); void exit_rcu(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index d68bb402120e..18394343f489 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -788,10 +788,16 @@ int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, * * @reg_offset: Offset of the status/mask register within the bank * @mask: Mask used to flag/control the register. + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_mask: Mask bit to configure RISING type irq. + * @type_falling_mask: Mask bit to configure FALLING type irq. */ struct regmap_irq { unsigned int reg_offset; unsigned int mask; + unsigned int type_reg_offset; + unsigned int type_rising_mask; + unsigned int type_falling_mask; }; #define REGMAP_IRQ_REG(_irq, _off, _mask) \ @@ -811,18 +817,23 @@ struct regmap_irq { * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. + * @type_base: Base address for irq type. If zero unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. * @use_ack: Use @ack register even if it is zero. * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @type_invert: Invert the type flags. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. * @irqs: Descriptors for individual IRQs. Interrupt numbers are * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. + * @num_type_reg: Number of type registers. + * @type_reg_stride: Stride to use for chips where type registers are not + * contiguous. */ struct regmap_irq_chip { const char *name; @@ -832,6 +843,7 @@ struct regmap_irq_chip { unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; + unsigned int type_base; unsigned int irq_reg_stride; bool init_ack_masked:1; bool mask_invert:1; @@ -839,11 +851,15 @@ struct regmap_irq_chip { bool ack_invert:1; bool wake_invert:1; bool runtime_pm:1; + bool type_invert:1; int num_regs; const struct regmap_irq *irqs; int num_irqs; + + int num_type_reg; + unsigned int type_reg_stride; }; struct regmap_irq_chip_data; @@ -1021,7 +1037,7 @@ static inline void regmap_async_complete(struct regmap *map) } static inline int regmap_register_patch(struct regmap *map, - const struct reg_default *regs, + const struct reg_sequence *regs, int num_regs) { WARN_ONCE(1, "regmap API is disabled"); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 9e0e76992be0..48603506f8de 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -140,6 +140,8 @@ struct regulator; * * @supply: The name of the supply. Initialised by the user before * using the bulk regulator APIs. + * @optional: The supply should be considered optional. Initialised by the user + * before using the bulk regulator APIs. * @consumer: The regulator consumer for the supply. This will be managed * by the bulk API. * @@ -149,6 +151,7 @@ struct regulator; */ struct regulator_bulk_data { const char *supply; + bool optional; struct regulator *consumer; /* private: Internal use */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 9c2903e58adb..16ac9e108806 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -302,6 +302,8 @@ struct regulator_desc { unsigned int vsel_reg; unsigned int vsel_mask; + unsigned int csel_reg; + unsigned int csel_mask; unsigned int apply_reg; unsigned int apply_bit; unsigned int enable_reg; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e50b31d18462..63bd7601b6de 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -823,4 +823,86 @@ out: return err; } +/* Internal function, please use rhashtable_replace_fast() instead */ +static inline int __rhashtable_replace_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj_old, struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t *lock; + unsigned int hash; + int err = -ENOENT; + + /* Minimally, the old and new objects must have same hash + * (which should mean identifiers are the same). + */ + hash = rht_head_hashfn(ht, tbl, obj_old, params); + if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) + return -EINVAL; + + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { + if (he != obj_old) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(obj_new->next, obj_old->next); + rcu_assign_pointer(*pprev, obj_new); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_replace_fast - replace an object in hash table + * @ht: hash table + * @obj_old: pointer to hash head inside object being replaced + * @obj_new: pointer to hash head inside object which is new + * @params: hash table parameters + * + * Replacing an object doesn't affect the number of elements in the hash table + * or bucket, so we don't need to worry about shrinking or expanding the + * table here. + * + * Returns zero on success, -ENOENT if the entry could not be found, + * -EINVAL if hash is not the same for the old and new objects. + */ +static inline int rhashtable_replace_fast( + struct rhashtable *ht, struct rhash_head *obj_old, + struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, + obj_new, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + rcu_read_unlock(); + + return err; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 29446aeef36e..bdf597c4f0be 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -85,6 +85,7 @@ enum ttu_flags { TTU_UNMAP = 1, /* unmap mode */ TTU_MIGRATION = 2, /* migration mode */ TTU_MUNLOCK = 4, /* munlock mode */ + TTU_LZFREE = 8, /* lazy free mode */ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ @@ -161,25 +162,31 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, struct anon_vma *page_get_anon_vma(struct page *page); +/* bitflags for do_page_add_anon_rmap() */ +#define RMAP_EXCLUSIVE 0x01 +#define RMAP_COMPOUND 0x02 + /* * rmap interfaces called when adding or removing pte of page */ void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); -void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, bool); void page_add_file_rmap(struct page *); -void page_remove_rmap(struct page *); +void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); -static inline void page_dup_rmap(struct page *page) +static inline void page_dup_rmap(struct page *page, bool compound) { - atomic_inc(&page->_mapcount); + atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); } /* @@ -210,6 +217,25 @@ static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, } /* + * Used by idle page tracking to check if a page was referenced via page + * tables. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, + unsigned long address, pmd_t **pmdp, + pte_t **ptep, spinlock_t **ptlp); +#else +static inline bool page_check_address_transhuge(struct page *page, + struct mm_struct *mm, unsigned long address, + pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) +{ + *ptep = page_check_address(page, mm, address, ptlp, 0); + *pmdp = NULL; + return !!*ptep; +} +#endif + +/* * Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); @@ -286,5 +312,6 @@ static inline int page_mkclean(struct page *page) #define SWAP_AGAIN 1 #define SWAP_FAIL 2 #define SWAP_MLOCK 3 +#define SWAP_LZFREE 4 #endif /* _LINUX_RMAP_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 4be5048b1fbe..c006cc900c44 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -84,6 +84,11 @@ void net_inc_ingress_queue(void); void net_dec_ingress_queue(void); #endif +#ifdef CONFIG_NET_EGRESS +void net_inc_egress_queue(void); +void net_dec_egress_queue(void); +#endif + extern void rtnetlink_init(void); extern void __rtnl_unlock(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..61aa9bbea871 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -177,9 +177,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void update_cpu_load_nohz(void); +extern void update_cpu_load_nohz(int active); #else -static inline void update_cpu_load_nohz(void) { } +static inline void update_cpu_load_nohz(int active) { } #endif extern unsigned long get_parent_ip(unsigned long addr); @@ -377,6 +377,7 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); @@ -387,6 +388,9 @@ extern unsigned int softlockup_panic; extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else +static inline void touch_softlockup_watchdog_sched(void) +{ +} static inline void touch_softlockup_watchdog(void) { } @@ -830,6 +834,7 @@ struct user_struct { unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ #endif unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ #ifdef CONFIG_KEYS struct key *uid_keyring; /* UID specific keyring */ @@ -1268,8 +1273,13 @@ struct sched_entity { #endif #ifdef CONFIG_SMP - /* Per entity load average tracking */ - struct sched_avg avg; + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; @@ -1455,14 +1465,15 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality; - unsigned in_execve:1; /* Tell the LSMs that the process is doing an - * execve */ - unsigned in_iowait:1; - - /* Revert to default priority/policy when forking */ + /* scheduler bits, serialized by scheduler locks */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned :0; /* force alignment to the next boundary */ + + /* unserialized, strictly 'current' */ + unsigned in_execve:1; /* bit to tell LSMs we're in execve */ + unsigned in_iowait:1; #ifdef CONFIG_MEMCG unsigned memcg_may_oom:1; #endif @@ -1519,11 +1530,14 @@ struct task_struct { cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_t vtime_seqlock; + seqcount_t vtime_seqcount; unsigned long long vtime_snap; enum { - VTIME_SLEEPING = 0, + /* Task is sleeping or running in a CPU with VTIME inactive */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active */ VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active */ VTIME_SYS, } vtime_snap_whence; #endif @@ -2002,7 +2016,8 @@ static inline int pid_alive(const struct task_struct *p) } /** - * is_global_init - check if a task structure is init + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. @@ -2011,7 +2026,7 @@ static inline int pid_alive(const struct task_struct *p) */ static inline int is_global_init(struct task_struct *tsk) { - return tsk->pid == 1; + return task_tgid_nr(tsk) == 1; } extern struct pid *cad_pid; diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index efa931c5cef1..411b52e424e1 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -10,11 +10,17 @@ #ifdef CONFIG_GENERIC_SCHED_CLOCK extern void sched_clock_postinit(void); -#else -static inline void sched_clock_postinit(void) { } -#endif extern void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate); +#else +static inline void sched_clock_postinit(void) { } + +static inline void sched_clock_register(u64 (*read)(void), int bits, + unsigned long rate) +{ + ; +} +#endif #endif diff --git a/include/linux/security.h b/include/linux/security.h index 2f4c1f7aa7db..4824a4ccaf1c 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -270,10 +270,10 @@ int security_inode_listxattr(struct dentry *dentry); int security_inode_removexattr(struct dentry *dentry, const char *name); int security_inode_need_killpriv(struct dentry *dentry); int security_inode_killpriv(struct dentry *dentry); -int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); +int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); -void security_inode_getsecid(const struct inode *inode, u32 *secid); +void security_inode_getsecid(struct inode *inode, u32 *secid); int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); @@ -353,6 +353,7 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); +void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); @@ -719,7 +720,7 @@ static inline int security_inode_killpriv(struct dentry *dentry) return cap_inode_killpriv(dentry); } -static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) +static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { return -EOPNOTSUPP; } @@ -734,7 +735,7 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer, return 0; } -static inline void security_inode_getsecid(const struct inode *inode, u32 *secid) +static inline void security_inode_getsecid(struct inode *inode, u32 *secid) { *secid = 0; } @@ -1093,6 +1094,10 @@ static inline void security_release_secctx(char *secdata, u32 seclen) { } +static inline void security_inode_invalidate_secctx(struct inode *inode) +{ +} + static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return -EOPNOTSUPP; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 297d4fa1cfe5..e03d6ba5e5b4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -145,11 +145,12 @@ struct uart_port { #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ -#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */ +#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */ #define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ #define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ #define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ #define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ +#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 7c536ac5be05..9f2bfd055742 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -32,6 +32,7 @@ enum { SCIx_SH2_SCIF_FIFODATA_REGTYPE, SCIx_SH3_SCIF_REGTYPE, SCIx_SH4_SCIF_REGTYPE, + SCIx_SH4_SCIF_BRG_REGTYPE, SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, SCIx_SH4_SCIF_FIFODATA_REGTYPE, SCIx_SH7705_SCIF_REGTYPE, diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 8c9131db2b25..f2e27e078362 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -4,7 +4,7 @@ #include <linux/phy.h> #include <linux/if_ether.h> -enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; +enum {EDMAC_LITTLE_ENDIAN}; struct sh_eth_plat_data { int phy; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 50777b5b1e4c..a43f41cb3c43 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -60,6 +60,10 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(swp_entry_t entry, struct page *page); +extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); +extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, + pgoff_t start, pgoff_t end); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4355129fff91..11f935c1a090 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -39,11 +39,55 @@ #include <linux/in6.h> #include <net/flow.h> -/* A. Checksumming of received packets by device. +/* The interface for checksum offload between the stack and networking drivers + * is as follows... + * + * A. IP checksum related features + * + * Drivers advertise checksum offload capabilities in the features of a device. + * From the stack's point of view these are capabilities offered by the driver, + * a driver typically only advertises features that it is capable of offloading + * to its device. + * + * The checksum related features are: + * + * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one + * IP (one's complement) checksum for any combination + * of protocols or protocol layering. The checksum is + * computed and set in a packet per the CHECKSUM_PARTIAL + * interface (see below). + * + * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv4. These are specifically + * unencapsulated packets of the form IPv4|TCP or + * IPv4|UDP where the Protocol field in the IPv4 header + * is TCP or UDP. The IPv4 header may contain IP options + * This feature cannot be set in features for a device + * with NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv6. These are specifically + * unencapsulated packets of the form IPv6|TCP or + * IPv4|UDP where the Next Header field in the IPv6 + * header is either TCP or UDP. IPv6 extension headers + * are not supported with this feature. This feature + * cannot be set in features for a device with + * NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. + * This flag is used only used to disable the RX checksum + * feature for a device. The stack will accept receive + * checksum indication in packets received on a device + * regardless of whether NETIF_F_RXCSUM is set. + * + * B. Checksumming of received packets by device. Indication of checksum + * verification is in set skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * - * Device failed to checksum this packet e.g. due to lack of capabilities. + * Device did not checksum this packet e.g. due to lack of capabilities. * The packet contains full (though not verified) checksum in packet but * not in skb->csum. Thus, skb->csum is undefined in this case. * @@ -53,9 +97,8 @@ * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY * if their checksums are okay. skb->csum is still undefined in this case - * though. It is a bad option, but, unfortunately, nowadays most vendors do - * this. Apparently with the secret goal to sell you new devices, when you - * will add new protocol to your host, f.e. IPv6 8) + * though. A driver or device must never modify the checksum field in the + * packet even if checksum is verified. * * CHECKSUM_UNNECESSARY is applicable to following protocols: * TCP: IPv6 and IPv4. @@ -96,40 +139,77 @@ * packet that are after the checksum being offloaded are not considered to * be verified. * - * B. Checksumming on output. - * - * CHECKSUM_NONE: - * - * The skb was already checksummed by the protocol, or a checksum is not - * required. + * C. Checksumming on transmit for non-GSO. The stack requests checksum offload + * in the skb->ip_summed for a packet. Values are: * * CHECKSUM_PARTIAL: * - * The device is required to checksum the packet as seen by hard_start_xmit() + * The driver is required to checksum the packet as seen by hard_start_xmit() * from skb->csum_start up to the end, and to record/write the checksum at - * offset skb->csum_start + skb->csum_offset. + * offset skb->csum_start + skb->csum_offset. A driver may verify that the + * csum_start and csum_offset values are valid values given the length and + * offset of the packet, however they should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum-- it is the + * purview of the stack to validate that csum_start and csum_offset are set + * correctly. + * + * When the stack requests checksum offload for a packet, the driver MUST + * ensure that the checksum is set correctly. A driver can either offload the + * checksum calculation to the device, or call skb_checksum_help (in the case + * that the device does not support offload for a particular checksum). + * + * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of + * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate + * checksum offload capability. If a device has limited checksum capabilities + * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as + * described above) a helper function can be called to resolve + * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper + * function takes a spec argument that describes the protocol layer that is + * supported for checksum offload and can be called for each packet. If a + * packet does not match the specification for offload, skb_checksum_help + * is called to resolve the checksum. * - * The device must show its capabilities in dev->features, set up at device - * setup time, e.g. netdev_features.h: + * CHECKSUM_NONE: * - * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything. - * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over - * IPv4. Sigh. Vendors like this way for an unknown reason. - * Though, see comment above about CHECKSUM_UNNECESSARY. 8) - * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead. - * NETIF_F_... - Well, you get the picture. + * The skb was already checksummed by the protocol, or a checksum is not + * required. * * CHECKSUM_UNNECESSARY: * - * Normally, the device will do per protocol specific checksumming. Protocol - * implementations that do not want the NIC to perform the checksum - * calculation should use this flag in their outgoing skbs. - * - * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC - * offload. Correspondingly, the FCoE protocol driver - * stack should use CHECKSUM_UNNECESSARY. + * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * output. * - * Any questions? No questions, good. --ANK + * CHECKSUM_COMPLETE: + * Not used in checksum output. If a driver observes a packet with this value + * set in skbuff, if should treat as CHECKSUM_NONE being set. + * + * D. Non-IP checksum (CRC) offloads + * + * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of + * offloading the SCTP CRC in a packet. To perform this offload the stack + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports + * both IP checksum offload and SCTP CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. + * + * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of + * offloading the FCOE CRC in a packet. To perform this offload the stack + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * both IP checksum offload and FCOE CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. + * + * E. Checksumming on output with GSO. + * + * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload + * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the + * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as + * part of the GSO operation is implied. If a checksum is being offloaded + * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset + * are set to refer to the outermost checksum being offload (two offloaded + * checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ @@ -833,7 +913,7 @@ struct sk_buff_fclones { * skb_fclone_busy - check if fclone is busy * @skb: buffer * - * Returns true is skb is a fast clone, and its clone is not freed. + * Returns true if skb is a fast clone, and its clone is not freed. * Some drivers call skb_orphan() in their ndo_start_xmit(), * so we also check that this didnt happen. */ @@ -1082,9 +1162,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) static inline void skb_sender_cpu_clear(struct sk_buff *skb) { -#ifdef CONFIG_XPS - skb->sender_cpu = 0; -#endif } #ifdef NET_SKBUFF_DATA_USES_OFFSET @@ -1942,6 +2019,11 @@ static inline unsigned char *skb_inner_transport_header(const struct sk_buff return skb->head + skb->inner_transport_header; } +static inline int skb_inner_transport_offset(const struct sk_buff *skb) +{ + return skb_inner_transport_header(skb) - skb->data; +} + static inline void skb_reset_inner_transport_header(struct sk_buff *skb) { skb->inner_transport_header = skb->data - skb->head; @@ -2723,6 +2805,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +static inline void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + /* For performing the reverse operation to skb_postpull_rcsum(), + * we can instead of ... + * + * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); + * + * ... just use this equivalent version here to save a few + * instructions. Feeding csum of 0 in csum_partial() and later + * on adding skb->csum is equivalent to feed skb->csum in the + * first place. + */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_partial(start, len, skb->csum); +} + /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim @@ -2788,6 +2887,12 @@ static inline void skb_frag_list_init(struct sk_buff *skb) #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) + +int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, + const struct sk_buff *skb); +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err, + struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, @@ -3446,7 +3551,8 @@ struct skb_gso_cb { int encap_level; __u16 csum_start; }; -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) +#define SKB_SGO_CB_OFFSET 32 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 2037a861e367..3ffee7422012 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -86,6 +86,11 @@ #else # define SLAB_FAILSLAB 0x00000000UL #endif +#ifdef CONFIG_MEMCG_KMEM +# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ +#else +# define SLAB_ACCOUNT 0x00000000UL +#endif /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index dad035c16d94..343c13ac4f71 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -144,17 +144,17 @@ struct knav_dma_cfg { * @psdata: Protocol specific */ struct knav_dma_desc { - u32 desc_info; - u32 tag_info; - u32 packet_info; - u32 buff_len; - u32 buff; - u32 next_desc; - u32 orig_len; - u32 orig_buff; - u32 epib[KNAV_DMA_NUM_EPIB_WORDS]; - u32 psdata[KNAV_DMA_NUM_PS_WORDS]; - u32 pad[4]; + __le32 desc_info; + __le32 tag_info; + __le32 packet_info; + __le32 buff_len; + __le32 buff; + __le32 next_desc; + __le32 orig_len; + __le32 orig_buff; + __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; + __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; + __le32 pad[4]; } ____cacheline_aligned; #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index fddebc617469..4018b48f2b3b 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -15,6 +15,7 @@ struct sock_diag_handler { __u8 family; int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); + int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); }; int sock_diag_register(const struct sock_diag_handler *h); @@ -68,4 +69,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk) } void sock_diag_broadcast_destroy(struct sock *sk); +int sock_diag_destroy(struct sock *sk, int err); #endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index cce80e6dc7d1..53be3a4c60cb 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -425,6 +425,12 @@ struct spi_master { #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ + /* + * on some hardware transfer size may be constrained + * the limit may depend on device transfer settings + */ + size_t (*max_transfer_size)(struct spi_device *spi); + /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -762,10 +768,15 @@ struct spi_message { void *state; }; +static inline void spi_message_init_no_memset(struct spi_message *m) +{ + INIT_LIST_HEAD(&m->transfers); +} + static inline void spi_message_init(struct spi_message *m) { memset(m, 0, sizeof *m); - INIT_LIST_HEAD(&m->transfers); + spi_message_init_no_memset(m); } static inline void @@ -832,6 +843,15 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, struct spi_message *message); +static inline size_t +spi_max_transfer_size(struct spi_device *spi) +{ + struct spi_master *master = spi->master; + if (!master->max_transfer_size) + return SIZE_MAX; + return master->max_transfer_size(spi); +} + /*---------------------------------------------------------------------------*/ /* All these synchronous SPI transfer routines are utilities layered @@ -1115,12 +1135,7 @@ spi_add_device(struct spi_device *spi); extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); -static inline void -spi_unregister_device(struct spi_device *spi) -{ - if (spi) - device_unregister(&spi->dev); -} +extern void spi_unregister_device(struct spi_device *spi); extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index c3d1a525bacc..26a0b3c3ce5f 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -524,13 +524,9 @@ struct ssb_init_invariants { typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, struct ssb_init_invariants *iv); -/* Register a SSB system bus. get_invariants() is called after the - * basic system devices are initialized. - * The invariants are usually fetched from some NVRAM. - * Put the invariants into the struct pointed to by iv. */ -extern int ssb_bus_ssbbus_register(struct ssb_bus *bus, - unsigned long baseaddr, - ssb_invariants_func_t get_invariants); +/* Register SoC bus. */ +extern int ssb_bus_host_soc_register(struct ssb_bus *bus, + unsigned long baseaddr); #ifdef CONFIG_SSB_PCIHOST extern int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci); diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 0e1b1540597a..3cc9632dcc2a 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -29,7 +29,7 @@ struct cpu_stop_work { int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); -void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, +bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); @@ -65,7 +65,7 @@ static void stop_one_cpu_nowait_workfn(struct work_struct *work) preempt_enable(); } -static inline void stop_one_cpu_nowait(unsigned int cpu, +static inline bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { @@ -74,7 +74,10 @@ static inline void stop_one_cpu_nowait(unsigned int cpu, work_buf->fn = fn; work_buf->arg = arg; schedule_work(&work_buf->work); + return true; } + + return false; } static inline int stop_cpus(const struct cpumask *cpumask, diff --git a/include/linux/string.h b/include/linux/string.h index 9ef7795e65e4..9eebc66d957a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -10,6 +10,7 @@ extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); +extern void *memdup_user_nul(const void __user *, size_t); /* * Include machine specific inline routines diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 78512cfe1fe6..b7dabc4baafd 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -128,6 +128,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, const unsigned short port); int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); +void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); static inline void svc_xprt_get(struct svc_xprt *xprt) { diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 8d71d6577459..c00f53a4ccdd 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -23,13 +23,19 @@ struct svc_cred { kgid_t cr_gid; struct group_info *cr_group_info; u32 cr_flavor; /* pseudoflavor */ - char *cr_principal; /* for gss */ + /* name of form servicetype/hostname@REALM, passed down by + * gss-proxy: */ + char *cr_raw_principal; + /* name of form servicetype@hostname, passed down by + * rpc.svcgssd, or computed from the above: */ + char *cr_principal; struct gss_api_mech *cr_gss_mech; }; static inline void init_svc_cred(struct svc_cred *cred) { cred->cr_group_info = NULL; + cred->cr_raw_principal = NULL; cred->cr_principal = NULL; cred->cr_gss_mech = NULL; } @@ -38,6 +44,7 @@ static inline void free_svc_cred(struct svc_cred *cred) { if (cred->cr_group_info) put_group_info(cred->cr_group_info); + kfree(cred->cr_raw_principal); kfree(cred->cr_principal); gss_mech_put(cred->cr_gss_mech); init_svc_cred(cred); diff --git a/include/linux/swap.h b/include/linux/swap.h index 7ba7dccaf0e7..414e101cd061 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -287,7 +287,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node) /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; -extern unsigned long dirty_balance_reserve; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); @@ -308,6 +307,7 @@ extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); +extern void deactivate_page(struct page *page); extern void swap_setup(void); extern void add_page_to_unevictable_list(struct page *page); @@ -539,7 +539,8 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -#define reuse_swap_page(page) (page_mapcount(page) == 1) +#define reuse_swap_page(page) \ + (!PageTransCompound(page) && page_mapcount(page) == 1) static inline int try_to_free_swap(struct page *page) { diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c2b66a277e98..185815c96433 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -886,6 +886,9 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, const char __user *const __user *envp, int flags); asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index ff307b548ed3..b4c2a485b28a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -56,9 +56,10 @@ extern long do_no_restart_syscall(struct restart_block *parm); #ifdef __KERNEL__ #ifdef CONFIG_DEBUG_STACK_USAGE -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ + __GFP_ZERO) #else -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) #endif /* diff --git a/include/linux/time.h b/include/linux/time.h index beebe3a02d43..297f09f23896 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv) extern struct timespec timespec_trunc(struct timespec t, unsigned gran); +/* + * Validates if a timespec/timeval used to inject a time offset is valid. + * Offsets can be postive or negative. The value of the timeval/timespec + * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must + * always be non-negative. + */ +static inline bool timeval_inject_offset_valid(const struct timeval *tv) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_inject_offset_valid(const struct timespec *ts) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more nanoseconds then a second */ + if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h new file mode 100644 index 000000000000..e1ee97c713bf --- /dev/null +++ b/include/linux/tracepoint-defs.h @@ -0,0 +1,27 @@ +#ifndef TRACEPOINT_DEFS_H +#define TRACEPOINT_DEFS_H 1 + +/* + * File can be included directly by headers who only want to access + * tracepoint->key to guard out of line trace calls. Otherwise + * linux/tracepoint.h should be used. + */ + +#include <linux/atomic.h> +#include <linux/static_key.h> + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint { + const char *name; /* Tracepoint name */ + struct static_key key; + void (*regfunc)(void); + void (*unregfunc)(void); + struct tracepoint_func __rcu *funcs; +}; + +#endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 696a339c592c..acd522a91539 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,26 +17,12 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/rcupdate.h> -#include <linux/static_key.h> +#include <linux/tracepoint-defs.h> struct module; struct tracepoint; struct notifier_block; -struct tracepoint_func { - void *func; - void *data; - int prio; -}; - -struct tracepoint { - const char *name; /* Tracepoint name */ - struct static_key key; - void (*regfunc)(void); - void (*unregfunc)(void); - struct tracepoint_func __rcu *funcs; -}; - struct trace_enum_map { const char *system; const char *enum_string; @@ -171,8 +157,8 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), \ - rcu_irq_enter(), \ - rcu_irq_exit()); \ + rcu_irq_enter_irqson(), \ + rcu_irq_exit_irqson()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) @@ -493,6 +479,10 @@ extern void syscall_unregfunc(void); #define TRACE_EVENT_FN(name, proto, args, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ + assign, print, reg, unreg) \ + DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ + PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 5e31f1b99037..2fd8708ea888 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -345,8 +345,6 @@ struct tty_file_private { #define TTY_HUPPED 18 /* Post driver->hangup() */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ -#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) - /* Values for tty->flow_change */ #define TTY_THROTTLE_SAFE 1 #define TTY_UNTHROTTLE_SAFE 2 @@ -395,8 +393,6 @@ static inline int __init tty_init(void) { return 0; } #endif -extern void tty_write_flush(struct tty_struct *); - extern struct ktermios tty_std_termios; extern int vcs_init(void); @@ -419,9 +415,8 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, - const char *routine); extern const char *tty_name(const struct tty_struct *tty); +extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); extern int tty_check_change(struct tty_struct *tty); @@ -667,10 +662,16 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {} static inline void proc_tty_unregister_driver(struct tty_driver *d) {} #endif -#define tty_debug(tty, f, args...) \ - do { \ - printk(KERN_DEBUG "%s: %s: " f, __func__, \ - tty_name(tty), ##args); \ - } while (0) +#define tty_msg(fn, tty, f, ...) \ + fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__) + +#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__) +#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__) +#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__) +#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__) +#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__) + +#define tty_info_ratelimited(tty, f, ...) \ + tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__) #endif diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 0994c0d01a09..75de43da2301 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -20,6 +20,11 @@ * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> * * Changes/Revisions: + * 0.5 08/13/2015 (David Herrmann <dh.herrmann@gmail.com> & + * Benjamin Tissoires <benjamin.tissoires@redhat.com>) + * - add UI_DEV_SETUP ioctl + * - add UI_ABS_SETUP ioctl + * - add UI_GET_VERSION ioctl * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>) * - add UI_GET_SYSNAME ioctl * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>) diff --git a/include/linux/uio.h b/include/linux/uio.h index 8b01e1c3c614..fd9bcfedad42 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -82,7 +82,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); -size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); @@ -145,7 +145,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int import_iovec(int type, const struct iovec __user * uvector, diff --git a/include/linux/usb.h b/include/linux/usb.h index b9a28074210f..89533ba38691 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -510,7 +510,8 @@ struct usb3_lpm_parameters { * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled - * @usb3_lpm_enabled: USB3 hardware LPM enabled + * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled + * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled * @string_langid: language ID for strings * @product: iProduct string, if present (static) * @manufacturer: iManufacturer string, if present (static) @@ -583,7 +584,8 @@ struct usb_device { unsigned usb2_hw_lpm_besl_capable:1; unsigned usb2_hw_lpm_enabled:1; unsigned usb2_hw_lpm_allowed:1; - unsigned usb3_lpm_enabled:1; + unsigned usb3_lpm_u1_enabled:1; + unsigned usb3_lpm_u2_enabled:1; int string_langid; /* static strings from the device */ diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 1f6526c76ee8..3a375d07d0dc 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -138,6 +138,7 @@ struct cdc_ncm_ctx { }; u8 cdc_ncm_select_altsetting(struct usb_interface *intf); +int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 3d583a10b926..d82d0068872b 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -402,6 +402,9 @@ static inline void usb_ep_free_request(struct usb_ep *ep, static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { + if (WARN_ON_ONCE(!ep->enabled && ep->address)) + return -ESHUTDOWN; + return ep->ops->queue(ep, req, gfp_flags); } @@ -1012,6 +1015,9 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers * and should be called in_interrupt. * @driver: Driver model state for this driver. + * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, + * this driver will be bound to any available UDC. + * @pending: UDC core private data used for deferred probe of this driver. * * Devices are disabled till a gadget driver successfully bind()s, which * means the driver will handle setup() requests needed to enumerate (and @@ -1072,6 +1078,9 @@ struct usb_gadget_driver { /* FIXME support safe rmmod */ struct device_driver driver; + + char *udc_name; + struct list_head pending; }; @@ -1117,8 +1126,6 @@ extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); -extern int usb_udc_attach_driver(const char *name, - struct usb_gadget_driver *driver); /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index f89c24bd53a4..4dcf8446dbcd 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -660,7 +660,7 @@ struct usb_mon_operations { /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ }; -extern struct usb_mon_operations *mon_ops; +extern const struct usb_mon_operations *mon_ops; static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) { @@ -682,7 +682,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, (*mon_ops->urb_complete)(bus, urb, status); } -int usb_mon_register(struct usb_mon_operations *ops); +int usb_mon_register(const struct usb_mon_operations *ops); void usb_mon_deregister(void); #else diff --git a/include/linux/usb/musb-omap.h b/include/linux/usb/musb-omap.h deleted file mode 100644 index 7774c5986f07..000000000000 --- a/include/linux/usb/musb-omap.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2011-2012 by Texas Instruments - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - */ - -#ifndef __MUSB_OMAP_H__ -#define __MUSB_OMAP_H__ - -enum omap_musb_vbus_id_status { - OMAP_MUSB_UNKNOWN = 0, - OMAP_MUSB_ID_GROUND, - OMAP_MUSB_ID_FLOAT, - OMAP_MUSB_VBUS_VALID, - OMAP_MUSB_VBUS_OFF, -}; - -#if (defined(CONFIG_USB_MUSB_OMAP2PLUS) || \ - defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE)) -void omap_musb_mailbox(enum omap_musb_vbus_id_status status); -#else -static inline void omap_musb_mailbox(enum omap_musb_vbus_id_status status) -{ -} -#endif - -#endif /* __MUSB_OMAP_H__ */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index fa6dc132bd1b..96ddfb7ab018 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -133,6 +133,21 @@ struct musb_hdrc_platform_data { const void *platform_ops; }; +enum musb_vbus_id_status { + MUSB_UNKNOWN = 0, + MUSB_ID_GROUND, + MUSB_ID_FLOAT, + MUSB_VBUS_VALID, + MUSB_VBUS_OFF, +}; + +#if IS_ENABLED(CONFIG_USB_MUSB_HDRC) +void musb_mailbox(enum musb_vbus_id_status status); +#else +static inline void musb_mailbox(enum musb_vbus_id_status status) +{ +} +#endif /* TUSB 6010 support */ diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index c3fe9e48ce27..974bce93aa28 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,10 +12,16 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); #else +static inline enum usb_dr_mode +of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +{ + return USB_DR_MODE_UNKNOWN; +} static inline bool of_usb_host_tpl_support(struct device_node *np) { return false; diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index bfb74723f151..4db191fe8c2c 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -105,12 +105,26 @@ struct renesas_usbhs_platform_callback { * some register needs USB chip specific parameters. * This struct show it to driver */ + +struct renesas_usbhs_driver_pipe_config { + u8 type; /* USB_ENDPOINT_XFER_xxx */ + u16 bufsize; + u8 bufnum; + bool double_buf; +}; +#define RENESAS_USBHS_PIPE(_type, _size, _num, _double_buf) { \ + .type = (_type), \ + .bufsize = (_size), \ + .bufnum = (_num), \ + .double_buf = (_double_buf), \ + } + struct renesas_usbhs_driver_param { /* * pipe settings */ - u32 *pipe_type; /* array of USB_ENDPOINT_XFER_xxx (from ep0) */ - int pipe_size; /* pipe_type array size */ + struct renesas_usbhs_driver_pipe_config *pipe_configs; + int pipe_size; /* pipe_configs array size */ /* * option: diff --git a/include/linux/vfio.h b/include/linux/vfio.h index ddb440975382..610a86a892b8 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -44,6 +44,9 @@ struct vfio_device_ops { void (*request)(void *device_data, unsigned int count); }; +extern struct iommu_group *vfio_iommu_group_get(struct device *dev); +extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); + extern int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, void *device_data); diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 73ea2fb04731..16c0ed6c50a7 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -46,7 +46,7 @@ * All kernel-specific stuff were moved to media/v4l2-dev.h, so * no #if __KERNEL tests are allowed here * - * See http://linuxtv.org for more info + * See https://linuxtv.org for more info * * Author: Bill Dirks <bill@thedirks.org> * Justin Schoeman diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index e623d392db0c..67c1dbd19c6d 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -25,6 +25,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), PGFREE, PGACTIVATE, PGDEACTIVATE, PGFAULT, PGMAJFAULT, + PGLAZYFREED, FOR_ALL_ZONES(PGREFILL), FOR_ALL_ZONES(PGSTEAL_KSWAPD), FOR_ALL_ZONES(PGSTEAL_DIRECT), @@ -68,7 +69,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_FAULT_FALLBACK, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, - THP_SPLIT, + THP_SPLIT_PAGE, + THP_SPLIT_PAGE_FAILED, + THP_SPLIT_PMD, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3bff87a25a42..d1f1d338af20 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -14,7 +14,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ -#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 3e4535876d37..3347cc3ec0ab 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -12,6 +12,9 @@ struct vmpressure { unsigned long scanned; unsigned long reclaimed; + + unsigned long tree_scanned; + unsigned long tree_reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ struct spinlock sr_lock; @@ -26,7 +29,7 @@ struct vmpressure { struct mem_cgroup; #ifdef CONFIG_MEMCG -extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, +extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); @@ -40,7 +43,7 @@ extern int vmpressure_register_event(struct mem_cgroup *memcg, extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else -static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, +static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) {} static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) {} diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 5dbc8b0ee567..73fae8c4a5fb 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) #ifdef CONFIG_SMP -void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); -void mod_zone_page_state(struct zone *, enum zone_stat_item, int); +void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); @@ -189,6 +189,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); +void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); @@ -205,7 +206,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, * The functions directly modify the zone and global counters. */ static inline void __mod_zone_page_state(struct zone *zone, - enum zone_stat_item item, int delta) + enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); } @@ -249,6 +250,7 @@ static inline void __dec_zone_page_state(struct page *page, static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } +static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { } diff --git a/include/linux/vtime.h b/include/linux/vtime.h index c5165fd256f9..fa2196990f84 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -10,16 +10,27 @@ struct task_struct; /* - * vtime_accounting_enabled() definitions/declarations + * vtime_accounting_cpu_enabled() definitions/declarations */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -static inline bool vtime_accounting_enabled(void) { return true; } +static inline bool vtime_accounting_cpu_enabled(void) { return true; } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +/* + * Checks if vtime is enabled on some CPU. Cputime readers want to be careful + * in that case and compute the tickless cputime. + * For now vtime state is tied to context tracking. We might want to decouple + * those later if necessary. + */ static inline bool vtime_accounting_enabled(void) { - if (context_tracking_is_enabled()) { + return context_tracking_is_enabled(); +} + +static inline bool vtime_accounting_cpu_enabled(void) +{ + if (vtime_accounting_enabled()) { if (context_tracking_cpu_is_enabled()) return true; } @@ -29,7 +40,7 @@ static inline bool vtime_accounting_enabled(void) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ #ifndef CONFIG_VIRT_CPU_ACCOUNTING -static inline bool vtime_accounting_enabled(void) { return false; } +static inline bool vtime_accounting_cpu_enabled(void) { return false; } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ @@ -44,7 +55,7 @@ extern void vtime_task_switch(struct task_struct *prev); extern void vtime_common_task_switch(struct task_struct *prev); static inline void vtime_task_switch(struct task_struct *prev) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_common_task_switch(prev); } #endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ @@ -59,7 +70,7 @@ extern void vtime_account_irq_enter(struct task_struct *tsk); extern void vtime_common_account_irq_enter(struct task_struct *tsk); static inline void vtime_account_irq_enter(struct task_struct *tsk) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_common_account_irq_enter(tsk); } #endif /* __ARCH_HAS_VTIME_ACCOUNT */ @@ -78,7 +89,7 @@ extern void vtime_gen_account_irq_exit(struct task_struct *tsk); static inline void vtime_account_irq_exit(struct task_struct *tsk) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_gen_account_irq_exit(tsk); } diff --git a/include/linux/wait.h b/include/linux/wait.h index 513b36f04dfd..ae71a769b89e 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -102,11 +102,62 @@ init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) q->func = func; } +/** + * waitqueue_active -- locklessly test for waiters on the queue + * @q: the waitqueue to test for waiters + * + * returns true if the wait list is not empty + * + * NOTE: this function is lockless and requires care, incorrect usage _will_ + * lead to sporadic and non-obvious failure. + * + * Use either while holding wait_queue_head_t::lock or when used for wakeups + * with an extra smp_mb() like: + * + * CPU0 - waker CPU1 - waiter + * + * for (;;) { + * @cond = true; prepare_to_wait(&wq, &wait, state); + * smp_mb(); // smp_mb() from set_current_state() + * if (waitqueue_active(wq)) if (@cond) + * wake_up(wq); break; + * schedule(); + * } + * finish_wait(&wq, &wait); + * + * Because without the explicit smp_mb() it's possible for the + * waitqueue_active() load to get hoisted over the @cond store such that we'll + * observe an empty wait list while the waiter might not observe @cond. + * + * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), + * which (when the lock is uncontended) are of roughly equal cost. + */ static inline int waitqueue_active(wait_queue_head_t *q) { return !list_empty(&q->task_list); } +/** + * wq_has_sleeper - check if there are any waiting processes + * @wq: wait queue head + * + * Returns true if wq has waiting processes + * + * Please refer to the comment for waitqueue_active. + */ +static inline bool wq_has_sleeper(wait_queue_head_t *wq) +{ + /* + * We need to be sure we are in sync with the + * add_wait_queue modifications to the wait queue. + * + * This memory barrier should be paired with one on the + * waiting side. + */ + smp_mb(); + return waitqueue_active(wq); +} + extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 027b1f43f12d..b585fa2507ee 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -12,10 +12,12 @@ #include <linux/bitops.h> #include <linux/device.h> #include <linux/cdev.h> +#include <linux/notifier.h> #include <uapi/linux/watchdog.h> struct watchdog_ops; struct watchdog_device; +struct watchdog_core_data; /** struct watchdog_ops - The watchdog-devices operations * @@ -26,8 +28,7 @@ struct watchdog_device; * @status: The routine that shows the status of the watchdog device. * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds). * @get_timeleft:The routine that gets the time left before a reset (in seconds). - * @ref: The ref operation for dyn. allocated watchdog_device structs - * @unref: The unref operation for dyn. allocated watchdog_device structs + * @restart: The routine for restarting the machine. * @ioctl: The routines that handles extra ioctl calls. * * The watchdog_ops structure contains a list of low-level operations @@ -45,25 +46,26 @@ struct watchdog_ops { unsigned int (*status)(struct watchdog_device *); int (*set_timeout)(struct watchdog_device *, unsigned int); unsigned int (*get_timeleft)(struct watchdog_device *); - void (*ref)(struct watchdog_device *); - void (*unref)(struct watchdog_device *); + int (*restart)(struct watchdog_device *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); }; /** struct watchdog_device - The structure that defines a watchdog device * * @id: The watchdog's ID. (Allocated by watchdog_register_device) - * @cdev: The watchdog's Character device. - * @dev: The device for our watchdog * @parent: The parent bus device + * @groups: List of sysfs attribute groups to create when creating the + * watchdog device. * @info: Pointer to a watchdog_info structure. * @ops: Pointer to the list of watchdog operations. * @bootstatus: Status of the watchdog device at boot. * @timeout: The watchdog devices timeout value (in seconds). * @min_timeout:The watchdog devices minimum timeout value (in seconds). * @max_timeout:The watchdog devices maximum timeout value (in seconds). - * @driver-data:Pointer to the drivers private data. - * @lock: Lock for watchdog core internal use only. + * @reboot_nb: The notifier block to stop watchdog on reboot. + * @restart_nb: The notifier block to register a restart function. + * @driver_data:Pointer to the drivers private data. + * @wd_data: Pointer to watchdog core internal data. * @status: Field that contains the devices internal status bits. * @deferred: entry in wtd_deferred_reg_list which is used to * register early initialized watchdogs. @@ -79,24 +81,23 @@ struct watchdog_ops { */ struct watchdog_device { int id; - struct cdev cdev; - struct device *dev; struct device *parent; + const struct attribute_group **groups; const struct watchdog_info *info; const struct watchdog_ops *ops; unsigned int bootstatus; unsigned int timeout; unsigned int min_timeout; unsigned int max_timeout; + struct notifier_block reboot_nb; + struct notifier_block restart_nb; void *driver_data; - struct mutex lock; + struct watchdog_core_data *wd_data; unsigned long status; /* Bit numbers for status flags */ #define WDOG_ACTIVE 0 /* Is the watchdog running/active */ -#define WDOG_DEV_OPEN 1 /* Opened via /dev/watchdog ? */ -#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ -#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ -#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ +#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ +#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ struct list_head deferred; }; @@ -116,6 +117,12 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway set_bit(WDOG_NO_WAY_OUT, &wdd->status); } +/* Use the following function to stop the watchdog on reboot */ +static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) +{ + set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { @@ -142,6 +149,7 @@ static inline void *watchdog_get_drvdata(struct watchdog_device *wdd) } /* drivers/watchdog/watchdog_core.c */ +void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority); extern int watchdog_init_timeout(struct watchdog_device *wdd, unsigned int timeout_parm, struct device *dev); extern int watchdog_register_device(struct watchdog_device *); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0197358f1e81..0e32bc71245e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -618,4 +618,10 @@ static inline int workqueue_sysfs_register(struct workqueue_struct *wq) { return 0; } #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_WQ_WATCHDOG +void wq_watchdog_touch(int cpu); +#else /* CONFIG_WQ_WATCHDOG */ +static inline void wq_watchdog_touch(int cpu) { } +#endif /* CONFIG_WQ_WATCHDOG */ + #endif diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 89474b9d260c..4457541de3c9 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -19,12 +19,16 @@ struct inode; struct dentry; +/* + * struct xattr_handler: When @name is set, match attributes with exactly that + * name. When @prefix is set instead, match attributes with that prefix and + * with a non-empty suffix. + */ struct xattr_handler { + const char *name; const char *prefix; int flags; /* fs private flags */ - size_t (*list)(const struct xattr_handler *, struct dentry *dentry, - char *list, size_t list_size, const char *name, - size_t name_len); + bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, @@ -53,8 +57,11 @@ int generic_setxattr(struct dentry *dentry, const char *name, const void *value, int generic_removexattr(struct dentry *dentry, const char *name); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); -int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, - const char *value, size_t size, gfp_t flags); + +static inline const char *xattr_prefix(const struct xattr_handler *handler) +{ + return handler->prefix ?: handler->name; +} struct simple_xattrs { struct list_head head; @@ -95,8 +102,7 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, const void *value, size_t size, int flags); -int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name); -ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer, +ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_list_add(struct simple_xattrs *xattrs, struct simple_xattr *new_xattr); diff --git a/include/media/cx2341x.h b/include/media/drv-intf/cx2341x.h index 9635eebaab09..9635eebaab09 100644 --- a/include/media/cx2341x.h +++ b/include/media/drv-intf/cx2341x.h diff --git a/include/media/cx25840.h b/include/media/drv-intf/cx25840.h index 783c5bdd63eb..783c5bdd63eb 100644 --- a/include/media/cx25840.h +++ b/include/media/drv-intf/cx25840.h diff --git a/include/media/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h index 69bcd2a07d5c..69bcd2a07d5c 100644 --- a/include/media/exynos-fimc.h +++ b/include/media/drv-intf/exynos-fimc.h diff --git a/include/media/msp3400.h b/include/media/drv-intf/msp3400.h index 90cf22ada8b4..1e6e80213a77 100644 --- a/include/media/msp3400.h +++ b/include/media/drv-intf/msp3400.h @@ -223,4 +223,3 @@ */ #endif /* MSP3400_H */ - diff --git a/include/media/s3c_camif.h b/include/media/drv-intf/s3c_camif.h index df96c2c789b4..df96c2c789b4 100644 --- a/include/media/s3c_camif.h +++ b/include/media/drv-intf/s3c_camif.h diff --git a/include/media/saa7146.h b/include/media/drv-intf/saa7146.h index 96058a5a4acc..96058a5a4acc 100644 --- a/include/media/saa7146.h +++ b/include/media/drv-intf/saa7146.h diff --git a/include/media/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h index 92766f77a5de..0da6ccc0615b 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/drv-intf/saa7146_vv.h @@ -4,7 +4,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> -#include <media/saa7146.h> +#include <media/drv-intf/saa7146.h> #include <media/videobuf-dma-sg.h> #define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */ diff --git a/include/media/sh_mobile_ceu.h b/include/media/drv-intf/sh_mobile_ceu.h index 7f57056c22ba..7f57056c22ba 100644 --- a/include/media/sh_mobile_ceu.h +++ b/include/media/drv-intf/sh_mobile_ceu.h diff --git a/include/media/sh_mobile_csi2.h b/include/media/drv-intf/sh_mobile_csi2.h index 14030db51f13..14030db51f13 100644 --- a/include/media/sh_mobile_csi2.h +++ b/include/media/drv-intf/sh_mobile_csi2.h diff --git a/include/media/sh_vou.h b/include/media/drv-intf/sh_vou.h index ec3ba9a597a2..ec3ba9a597a2 100644 --- a/include/media/sh_vou.h +++ b/include/media/drv-intf/sh_vou.h diff --git a/include/media/si476x.h b/include/media/drv-intf/si476x.h index e02e241e2d22..ad87fa8483b2 100644 --- a/include/media/si476x.h +++ b/include/media/drv-intf/si476x.h @@ -1,5 +1,5 @@ /* - * include/media/si476x.h -- Common definitions for si476x driver + * include/media/drv-intf/si476x.h -- Common definitions for si476x driver * * Copyright (C) 2012 Innovative Converged Devices(ICD) * Copyright (C) 2013 Andrey Smirnov diff --git a/include/media/soc_mediabus.h b/include/media/drv-intf/soc_mediabus.h index 2ff773785fb6..2ff773785fb6 100644 --- a/include/media/soc_mediabus.h +++ b/include/media/drv-intf/soc_mediabus.h diff --git a/include/media/tea575x.h b/include/media/drv-intf/tea575x.h index 5d096578b736..fb272d48ba33 100644 --- a/include/media/tea575x.h +++ b/include/media/drv-intf/tea575x.h @@ -63,7 +63,7 @@ struct snd_tea575x { u32 band; /* 0: FM, 1: FM-Japan, 2: AM */ u32 freq; /* frequency */ struct mutex mutex; - struct snd_tea575x_ops *ops; + const struct snd_tea575x_ops *ops; void *private_data; u8 card[32]; u8 bus_info[32]; diff --git a/include/media/ad9389b.h b/include/media/i2c/ad9389b.h index 5ba9af869b8b..5ba9af869b8b 100644 --- a/include/media/ad9389b.h +++ b/include/media/i2c/ad9389b.h diff --git a/include/media/adp1653.h b/include/media/i2c/adp1653.h index 9779c8549eb4..0b6709335dff 100644 --- a/include/media/adp1653.h +++ b/include/media/i2c/adp1653.h @@ -1,5 +1,5 @@ /* - * include/media/adp1653.h + * include/media/i2c/adp1653.h * * Copyright (C) 2008--2011 Nokia Corporation * diff --git a/include/media/adv7183.h b/include/media/i2c/adv7183.h index c5c2d377c0a6..c5c2d377c0a6 100644 --- a/include/media/adv7183.h +++ b/include/media/i2c/adv7183.h diff --git a/include/media/adv7343.h b/include/media/i2c/adv7343.h index e4142b1ef8cd..e4142b1ef8cd 100644 --- a/include/media/adv7343.h +++ b/include/media/i2c/adv7343.h diff --git a/include/media/adv7393.h b/include/media/i2c/adv7393.h index b28edf351842..b28edf351842 100644 --- a/include/media/adv7393.h +++ b/include/media/i2c/adv7393.h diff --git a/include/media/adv7511.h b/include/media/i2c/adv7511.h index d83b91d80764..d83b91d80764 100644 --- a/include/media/adv7511.h +++ b/include/media/i2c/adv7511.h diff --git a/include/media/adv7604.h b/include/media/i2c/adv7604.h index a913859bfd30..a913859bfd30 100644 --- a/include/media/adv7604.h +++ b/include/media/i2c/adv7604.h diff --git a/include/media/adv7842.h b/include/media/i2c/adv7842.h index bc249709bf35..bc249709bf35 100644 --- a/include/media/adv7842.h +++ b/include/media/i2c/adv7842.h diff --git a/include/media/ak881x.h b/include/media/i2c/ak881x.h index b7f2add5ce7b..b7f2add5ce7b 100644 --- a/include/media/ak881x.h +++ b/include/media/i2c/ak881x.h diff --git a/include/media/as3645a.h b/include/media/i2c/as3645a.h index 5075496d2c9e..0e07484ddc33 100644 --- a/include/media/as3645a.h +++ b/include/media/i2c/as3645a.h @@ -1,5 +1,5 @@ /* - * include/media/as3645a.h + * include/media/i2c/as3645a.h * * Copyright (C) 2008-2011 Nokia Corporation * diff --git a/include/media/bt819.h b/include/media/i2c/bt819.h index 8025f4bc2bb6..8025f4bc2bb6 100644 --- a/include/media/bt819.h +++ b/include/media/i2c/bt819.h diff --git a/include/media/cs5345.h b/include/media/i2c/cs5345.h index 6ccae24e65ed..6ccae24e65ed 100644 --- a/include/media/cs5345.h +++ b/include/media/i2c/cs5345.h diff --git a/include/media/cs53l32a.h b/include/media/i2c/cs53l32a.h index bf76197d3790..bf76197d3790 100644 --- a/include/media/cs53l32a.h +++ b/include/media/i2c/cs53l32a.h diff --git a/include/media/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h index d8564354debb..d8564354debb 100644 --- a/include/media/ir-kbd-i2c.h +++ b/include/media/i2c/ir-kbd-i2c.h diff --git a/include/media/lm3560.h b/include/media/i2c/lm3560.h index 46670706d6f8..5ed942a8ac32 100644 --- a/include/media/lm3560.h +++ b/include/media/i2c/lm3560.h @@ -1,5 +1,5 @@ /* - * include/media/lm3560.h + * include/media/i2c/lm3560.h * * Copyright (C) 2013 Texas Instruments * diff --git a/include/media/lm3646.h b/include/media/i2c/lm3646.h index c6acf5a1d640..724c10003a28 100644 --- a/include/media/lm3646.h +++ b/include/media/i2c/lm3646.h @@ -1,5 +1,5 @@ /* - * include/media/lm3646.h + * include/media/i2c/lm3646.h * * Copyright (C) 2014 Texas Instruments * diff --git a/include/media/m52790.h b/include/media/i2c/m52790.h index 7ddffae31a67..7ddffae31a67 100644 --- a/include/media/m52790.h +++ b/include/media/i2c/m52790.h diff --git a/include/media/m5mols.h b/include/media/i2c/m5mols.h index 4a825ae5c6c8..4a825ae5c6c8 100644 --- a/include/media/m5mols.h +++ b/include/media/i2c/m5mols.h diff --git a/include/media/mt9m032.h b/include/media/i2c/mt9m032.h index c3a78114d7a6..c3a78114d7a6 100644 --- a/include/media/mt9m032.h +++ b/include/media/i2c/mt9m032.h diff --git a/include/media/mt9p031.h b/include/media/i2c/mt9p031.h index 1ba361205af1..1ba361205af1 100644 --- a/include/media/mt9p031.h +++ b/include/media/i2c/mt9p031.h diff --git a/include/media/mt9t001.h b/include/media/i2c/mt9t001.h index 03fd63edd133..03fd63edd133 100644 --- a/include/media/mt9t001.h +++ b/include/media/i2c/mt9t001.h diff --git a/include/media/mt9t112.h b/include/media/i2c/mt9t112.h index a43c74ab05ec..a43c74ab05ec 100644 --- a/include/media/mt9t112.h +++ b/include/media/i2c/mt9t112.h diff --git a/include/media/mt9v011.h b/include/media/i2c/mt9v011.h index ea29fc74cd06..ea29fc74cd06 100644 --- a/include/media/mt9v011.h +++ b/include/media/i2c/mt9v011.h diff --git a/include/media/mt9v022.h b/include/media/i2c/mt9v022.h index 40561801321a..40561801321a 100644 --- a/include/media/mt9v022.h +++ b/include/media/i2c/mt9v022.h diff --git a/include/media/mt9v032.h b/include/media/i2c/mt9v032.h index 12175a63c5b2..12175a63c5b2 100644 --- a/include/media/mt9v032.h +++ b/include/media/i2c/mt9v032.h diff --git a/include/media/noon010pc30.h b/include/media/i2c/noon010pc30.h index 58eafee36b30..58eafee36b30 100644 --- a/include/media/noon010pc30.h +++ b/include/media/i2c/noon010pc30.h diff --git a/include/media/ov2659.h b/include/media/i2c/ov2659.h index 4216adc1ede2..4216adc1ede2 100644 --- a/include/media/ov2659.h +++ b/include/media/i2c/ov2659.h diff --git a/include/media/ov7670.h b/include/media/i2c/ov7670.h index 1913d5123072..1913d5123072 100644 --- a/include/media/ov7670.h +++ b/include/media/i2c/ov7670.h diff --git a/include/media/ov772x.h b/include/media/i2c/ov772x.h index 00dbb7c4feae..00dbb7c4feae 100644 --- a/include/media/ov772x.h +++ b/include/media/i2c/ov772x.h diff --git a/include/media/ov9650.h b/include/media/i2c/ov9650.h index d630cf9e028d..d630cf9e028d 100644 --- a/include/media/ov9650.h +++ b/include/media/i2c/ov9650.h diff --git a/include/media/rj54n1cb0c.h b/include/media/i2c/rj54n1cb0c.h index 8ae3288ae925..8ae3288ae925 100644 --- a/include/media/rj54n1cb0c.h +++ b/include/media/i2c/rj54n1cb0c.h diff --git a/include/media/s5c73m3.h b/include/media/i2c/s5c73m3.h index ccb9e5448762..ccb9e5448762 100644 --- a/include/media/s5c73m3.h +++ b/include/media/i2c/s5c73m3.h diff --git a/include/media/s5k4ecgx.h b/include/media/i2c/s5k4ecgx.h index 90c1be792ffe..90c1be792ffe 100644 --- a/include/media/s5k4ecgx.h +++ b/include/media/i2c/s5k4ecgx.h diff --git a/include/media/s5k6aa.h b/include/media/i2c/s5k6aa.h index ba34f7055e55..ba34f7055e55 100644 --- a/include/media/s5k6aa.h +++ b/include/media/i2c/s5k6aa.h diff --git a/include/media/saa6588.h b/include/media/i2c/saa6588.h index b5ec1aa60ed5..b5ec1aa60ed5 100644 --- a/include/media/saa6588.h +++ b/include/media/i2c/saa6588.h diff --git a/include/media/saa7115.h b/include/media/i2c/saa7115.h index 76911e71de17..53954c90e7f6 100644 --- a/include/media/saa7115.h +++ b/include/media/i2c/saa7115.h @@ -138,4 +138,3 @@ struct saa7115_platform_data { }; #endif - diff --git a/include/media/saa7127.h b/include/media/i2c/saa7127.h index bbcf862141af..7005ba7daa9e 100644 --- a/include/media/saa7127.h +++ b/include/media/i2c/saa7127.h @@ -38,4 +38,3 @@ enum saa7127_output_type { }; #endif - diff --git a/include/media/smiapp.h b/include/media/i2c/smiapp.h index 268a3cdbf6cb..029142ddb95c 100644 --- a/include/media/smiapp.h +++ b/include/media/i2c/smiapp.h @@ -1,5 +1,5 @@ /* - * include/media/smiapp.h + * include/media/i2c/smiapp.h * * Generic driver for SMIA/SMIA++ compliant camera modules * diff --git a/include/media/sr030pc30.h b/include/media/i2c/sr030pc30.h index 6f901a653ba2..6f901a653ba2 100644 --- a/include/media/sr030pc30.h +++ b/include/media/i2c/sr030pc30.h diff --git a/include/media/tc358743.h b/include/media/i2c/tc358743.h index 4513f2f9cfbc..4513f2f9cfbc 100644 --- a/include/media/tc358743.h +++ b/include/media/i2c/tc358743.h diff --git a/include/media/ths7303.h b/include/media/i2c/ths7303.h index a7b49297da82..a7b49297da82 100644 --- a/include/media/ths7303.h +++ b/include/media/i2c/ths7303.h diff --git a/include/media/tvaudio.h b/include/media/i2c/tvaudio.h index 1ac8184693f8..1ac8184693f8 100644 --- a/include/media/tvaudio.h +++ b/include/media/i2c/tvaudio.h diff --git a/include/media/tvp514x.h b/include/media/i2c/tvp514x.h index 86ed7e806830..86ed7e806830 100644 --- a/include/media/tvp514x.h +++ b/include/media/i2c/tvp514x.h diff --git a/include/media/tvp5150.h b/include/media/i2c/tvp5150.h index 72bd2a2b8bfd..649908a25605 100644 --- a/include/media/tvp5150.h +++ b/include/media/i2c/tvp5150.h @@ -31,4 +31,3 @@ #define TVP5150_BLACK_SCREEN 1 #endif - diff --git a/include/media/tvp7002.h b/include/media/i2c/tvp7002.h index fadb6afe9ef0..fadb6afe9ef0 100644 --- a/include/media/tvp7002.h +++ b/include/media/i2c/tvp7002.h diff --git a/include/media/tw9910.h b/include/media/i2c/tw9910.h index 90bcf1fa5421..90bcf1fa5421 100644 --- a/include/media/tw9910.h +++ b/include/media/i2c/tw9910.h diff --git a/include/media/uda1342.h b/include/media/i2c/uda1342.h index cd156403a368..cd156403a368 100644 --- a/include/media/uda1342.h +++ b/include/media/i2c/uda1342.h diff --git a/include/media/upd64031a.h b/include/media/i2c/upd64031a.h index 3ad6a32e1bce..3ad6a32e1bce 100644 --- a/include/media/upd64031a.h +++ b/include/media/i2c/upd64031a.h diff --git a/include/media/upd64083.h b/include/media/i2c/upd64083.h index 59b6f32ba300..59b6f32ba300 100644 --- a/include/media/upd64083.h +++ b/include/media/i2c/upd64083.h diff --git a/include/media/wm8775.h b/include/media/i2c/wm8775.h index d0e801a9935c..d0e801a9935c 100644 --- a/include/media/wm8775.h +++ b/include/media/i2c/wm8775.h diff --git a/include/media/lirc.h b/include/media/lirc.h index 4b3ab2966b5a..554988c860c1 100644 --- a/include/media/lirc.h +++ b/include/media/lirc.h @@ -1,168 +1 @@ -/* - * lirc.h - linux infrared remote control header file - * last modified 2010/07/13 by Jarod Wilson - */ - -#ifndef _LINUX_LIRC_H -#define _LINUX_LIRC_H - -#include <linux/types.h> -#include <linux/ioctl.h> - -#define PULSE_BIT 0x01000000 -#define PULSE_MASK 0x00FFFFFF - -#define LIRC_MODE2_SPACE 0x00000000 -#define LIRC_MODE2_PULSE 0x01000000 -#define LIRC_MODE2_FREQUENCY 0x02000000 -#define LIRC_MODE2_TIMEOUT 0x03000000 - -#define LIRC_VALUE_MASK 0x00FFFFFF -#define LIRC_MODE2_MASK 0xFF000000 - -#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) -#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) -#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) -#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) - -#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) -#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) - -#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) -#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) -#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) -#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) - -/* used heavily by lirc userspace */ -#define lirc_t int - -/*** lirc compatible hardware features ***/ - -#define LIRC_MODE2SEND(x) (x) -#define LIRC_SEND2MODE(x) (x) -#define LIRC_MODE2REC(x) ((x) << 16) -#define LIRC_REC2MODE(x) ((x) >> 16) - -#define LIRC_MODE_RAW 0x00000001 -#define LIRC_MODE_PULSE 0x00000002 -#define LIRC_MODE_MODE2 0x00000004 -#define LIRC_MODE_LIRCCODE 0x00000010 - - -#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) -#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) -#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) -#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_SEND_MASK 0x0000003f - -#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 -#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 -#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 - -#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) -#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) -#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) -#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) - -#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) - -#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 -#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 -#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 -#define LIRC_CAN_SET_REC_FILTER 0x08000000 - -#define LIRC_CAN_MEASURE_CARRIER 0x02000000 -#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 - -#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) -#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) - -#define LIRC_CAN_NOTIFY_DECODE 0x01000000 - -/*** IOCTL commands for lirc driver ***/ - -#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) - -#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) -#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) -#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) -#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) -#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) -#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) -#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) - -#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) -#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) - -#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) -#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) -#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) -#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) - -/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ -#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) - -#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) -#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) -/* Note: these can reset the according pulse_width */ -#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) -#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) -#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) -#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) -#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) - -/* - * when a timeout != 0 is set the driver will send a - * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is - * never sent, timeout is disabled by default - */ -#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) - -/* 1 enables, 0 disables timeout reports in MODE2 */ -#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) - -/* - * pulses shorter than this are filtered out by hardware (software - * emulation in lirc_dev?) - */ -#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x0000001a, __u32) -/* - * spaces shorter than this are filtered out by hardware (software - * emulation in lirc_dev?) - */ -#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001b, __u32) -/* - * if filter cannot be set independently for pulse/space, this should - * be used - */ -#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001c, __u32) - -/* - * if enabled from the next key press on the driver will send - * LIRC_MODE2_FREQUENCY packets - */ -#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) - -/* - * to set a range use - * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the - * lower bound first and later - * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound - */ - -#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) -#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) - -#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) - -#define LIRC_SETUP_START _IO('i', 0x00000021) -#define LIRC_SETUP_END _IO('i', 0x00000022) - -#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) - -#endif +#include <uapi/linux/lirc.h> diff --git a/include/media/media-device.h b/include/media/media-device.h index 6e6db78f1ee2..d3855898c3fc 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -30,6 +30,238 @@ #include <media/media-devnode.h> #include <media/media-entity.h> +/** + * DOC: Media Controller + * + * The media controller userspace API is documented in DocBook format in + * Documentation/DocBook/media/v4l/media-controller.xml. This document focus + * on the kernel-side implementation of the media framework. + * + * * Abstract media device model: + * + * Discovering a device internal topology, and configuring it at runtime, is one + * of the goals of the media framework. To achieve this, hardware devices are + * modelled as an oriented graph of building blocks called entities connected + * through pads. + * + * An entity is a basic media hardware building block. It can correspond to + * a large variety of logical blocks such as physical hardware devices + * (CMOS sensor for instance), logical hardware devices (a building block + * in a System-on-Chip image processing pipeline), DMA channels or physical + * connectors. + * + * A pad is a connection endpoint through which an entity can interact with + * other entities. Data (not restricted to video) produced by an entity + * flows from the entity's output to one or more entity inputs. Pads should + * not be confused with physical pins at chip boundaries. + * + * A link is a point-to-point oriented connection between two pads, either + * on the same entity or on different entities. Data flows from a source + * pad to a sink pad. + * + * + * * Media device: + * + * A media device is represented by a struct &media_device instance, defined in + * include/media/media-device.h. Allocation of the structure is handled by the + * media device driver, usually by embedding the &media_device instance in a + * larger driver-specific structure. + * + * Drivers register media device instances by calling + * __media_device_register() via the macro media_device_register() + * and unregistered by calling + * media_device_unregister(). + * + * * Entities, pads and links: + * + * - Entities + * + * Entities are represented by a struct &media_entity instance, defined in + * include/media/media-entity.h. The structure is usually embedded into a + * higher-level structure, such as a v4l2_subdev or video_device instance, + * although drivers can allocate entities directly. + * + * Drivers initialize entity pads by calling + * media_entity_pads_init(). + * + * Drivers register entities with a media device by calling + * media_device_register_entity() + * and unregistred by calling + * media_device_unregister_entity(). + * + * - Interfaces + * + * Interfaces are represented by a struct &media_interface instance, defined in + * include/media/media-entity.h. Currently, only one type of interface is + * defined: a device node. Such interfaces are represented by a struct + * &media_intf_devnode. + * + * Drivers initialize and create device node interfaces by calling + * media_devnode_create() + * and remove them by calling: + * media_devnode_remove(). + * + * - Pads + * + * Pads are represented by a struct &media_pad instance, defined in + * include/media/media-entity.h. Each entity stores its pads in a pads array + * managed by the entity driver. Drivers usually embed the array in a + * driver-specific structure. + * + * Pads are identified by their entity and their 0-based index in the pads + * array. + * Both information are stored in the &media_pad structure, making the + * &media_pad pointer the canonical way to store and pass link references. + * + * Pads have flags that describe the pad capabilities and state. + * + * %MEDIA_PAD_FL_SINK indicates that the pad supports sinking data. + * %MEDIA_PAD_FL_SOURCE indicates that the pad supports sourcing data. + * + * NOTE: One and only one of %MEDIA_PAD_FL_SINK and %MEDIA_PAD_FL_SOURCE must + * be set for each pad. + * + * - Links + * + * Links are represented by a struct &media_link instance, defined in + * include/media/media-entity.h. There are two types of links: + * + * 1. pad to pad links: + * + * Associate two entities via their PADs. Each entity has a list that points + * to all links originating at or targeting any of its pads. + * A given link is thus stored twice, once in the source entity and once in + * the target entity. + * + * Drivers create pad to pad links by calling: + * media_create_pad_link() and remove with media_entity_remove_links(). + * + * 2. interface to entity links: + * + * Associate one interface to a Link. + * + * Drivers create interface to entity links by calling: + * media_create_intf_link() and remove with media_remove_intf_links(). + * + * NOTE: + * + * Links can only be created after having both ends already created. + * + * Links have flags that describe the link capabilities and state. The + * valid values are described at media_create_pad_link() and + * media_create_intf_link(). + * + * Graph traversal: + * + * The media framework provides APIs to iterate over entities in a graph. + * + * To iterate over all entities belonging to a media device, drivers can use + * the media_device_for_each_entity macro, defined in + * include/media/media-device.h. + * + * struct media_entity *entity; + * + * media_device_for_each_entity(entity, mdev) { + * // entity will point to each entity in turn + * ... + * } + * + * Drivers might also need to iterate over all entities in a graph that can be + * reached only through enabled links starting at a given entity. The media + * framework provides a depth-first graph traversal API for that purpose. + * + * Note that graphs with cycles (whether directed or undirected) are *NOT* + * supported by the graph traversal API. To prevent infinite loops, the graph + * traversal code limits the maximum depth to MEDIA_ENTITY_ENUM_MAX_DEPTH, + * currently defined as 16. + * + * Drivers initiate a graph traversal by calling + * media_entity_graph_walk_start() + * + * The graph structure, provided by the caller, is initialized to start graph + * traversal at the given entity. + * + * Drivers can then retrieve the next entity by calling + * media_entity_graph_walk_next() + * + * When the graph traversal is complete the function will return NULL. + * + * Graph traversal can be interrupted at any moment. No cleanup function call + * is required and the graph structure can be freed normally. + * + * Helper functions can be used to find a link between two given pads, or a pad + * connected to another pad through an enabled link + * media_entity_find_link() and media_entity_remote_pad() + * + * Use count and power handling: + * + * Due to the wide differences between drivers regarding power management + * needs, the media controller does not implement power management. However, + * the &media_entity structure includes a use_count field that media drivers + * can use to track the number of users of every entity for power management + * needs. + * + * The &media_entity.@use_count field is owned by media drivers and must not be + * touched by entity drivers. Access to the field must be protected by the + * &media_device.@graph_mutex lock. + * + * Links setup: + * + * Link properties can be modified at runtime by calling + * media_entity_setup_link() + * + * Pipelines and media streams: + * + * When starting streaming, drivers must notify all entities in the pipeline to + * prevent link states from being modified during streaming by calling + * media_entity_pipeline_start(). + * + * The function will mark all entities connected to the given entity through + * enabled links, either directly or indirectly, as streaming. + * + * The &media_pipeline instance pointed to by the pipe argument will be stored + * in every entity in the pipeline. Drivers should embed the &media_pipeline + * structure in higher-level pipeline structures and can then access the + * pipeline through the &media_entity pipe field. + * + * Calls to media_entity_pipeline_start() can be nested. The pipeline pointer + * must be identical for all nested calls to the function. + * + * media_entity_pipeline_start() may return an error. In that case, it will + * clean up any of the changes it did by itself. + * + * When stopping the stream, drivers must notify the entities with + * media_entity_pipeline_stop(). + * + * If multiple calls to media_entity_pipeline_start() have been made the same + * number of media_entity_pipeline_stop() calls are required to stop streaming. + * The &media_entity pipe field is reset to NULL on the last nested stop call. + * + * Link configuration will fail with -%EBUSY by default if either end of the + * link is a streaming entity. Links that can be modified while streaming must + * be marked with the %MEDIA_LNK_FL_DYNAMIC flag. + * + * If other operations need to be disallowed on streaming entities (such as + * changing entities configuration parameters) drivers can explicitly check the + * media_entity stream_count field to find out if an entity is streaming. This + * operation must be done with the media_device graph_mutex held. + * + * Link validation: + * + * Link validation is performed by media_entity_pipeline_start() for any + * entity which has sink pads in the pipeline. The + * &media_entity.@link_validate() callback is used for that purpose. In + * @link_validate() callback, entity driver should check that the properties of + * the source pad of the connected entity and its own sink pad match. It is up + * to the type of the entity (and in the end, the properties of the hardware) + * what matching actually means. + * + * Subsystems should facilitate link validation by providing subsystem specific + * helper functions to provide easy access for commonly needed information, and + * in the end provide a way to use driver-specific callbacks. + */ + +struct ida; struct device; /** @@ -41,8 +273,16 @@ struct device; * @bus_info: Unique and stable device location identifier * @hw_revision: Hardware device revision * @driver_version: Device driver version - * @entity_id: ID of the next entity to be registered + * @topology_version: Monotonic counter for storing the version of the graph + * topology. Should be incremented each time the topology changes. + * @id: Unique ID used on the last registered graph object + * @entity_internal_idx: Unique internal entity ID used by the graph traversal + * algorithms + * @entity_internal_idx_max: Allocated internal entity indices * @entities: List of registered entities + * @interfaces: List of registered interfaces + * @pads: List of registered pads + * @links: List of registered links * @lock: Entities list lock * @graph_mutex: Entities graph operation lock * @link_notify: Link state change notification callback @@ -68,10 +308,18 @@ struct media_device { u32 hw_revision; u32 driver_version; - u32 entity_id; + u32 topology_version; + + u32 id; + struct ida entity_internal_idx; + int entity_internal_idx_max; + struct list_head entities; + struct list_head interfaces; + struct list_head pads; + struct list_head links; - /* Protects the entities list */ + /* Protects the graph objects creation/removal */ spinlock_t lock; /* Serializes graph operations. */ struct mutex graph_mutex; @@ -80,6 +328,8 @@ struct media_device { unsigned int notification); }; +#ifdef CONFIG_MEDIA_CONTROLLER + /* Supported link_notify @notification values. */ #define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 @@ -87,17 +337,228 @@ struct media_device { /* media_devnode to media_device */ #define to_media_device(node) container_of(node, struct media_device, devnode) +/** + * media_entity_enum_init - Initialise an entity enumeration + * + * @ent_enum: Entity enumeration to be initialised + * @mdev: The related media device + * + * Returns zero on success or a negative error code. + */ +static inline __must_check int media_entity_enum_init( + struct media_entity_enum *ent_enum, struct media_device *mdev) +{ + return __media_entity_enum_init(ent_enum, + mdev->entity_internal_idx_max + 1); +} + +/** + * media_device_init() - Initializes a media device element + * + * @mdev: pointer to struct &media_device + * + * This function initializes the media device prior to its registration. + * The media device initialization and registration is split in two functions + * to avoid race conditions and make the media device available to user-space + * before the media graph has been completed. + * + * So drivers need to first initialize the media device, register any entity + * within the media device, create pad to pad links and then finally register + * the media device by calling media_device_register() as a final step. + */ +void media_device_init(struct media_device *mdev); + +/** + * media_device_cleanup() - Cleanups a media device element + * + * @mdev: pointer to struct &media_device + * + * This function that will destroy the graph_mutex that is + * initialized in media_device_init(). + */ +void media_device_cleanup(struct media_device *mdev); + +/** + * __media_device_register() - Registers a media device element + * + * @mdev: pointer to struct &media_device + * @owner: should be filled with %THIS_MODULE + * + * Users, should, instead, call the media_device_register() macro. + * + * The caller is responsible for initializing the media_device structure before + * registration. The following fields must be set: + * + * - dev must point to the parent device (usually a &pci_dev, &usb_interface or + * &platform_device instance). + * + * - model must be filled with the device model name as a NUL-terminated UTF-8 + * string. The device/model revision must not be stored in this field. + * + * The following fields are optional: + * + * - serial is a unique serial number stored as a NUL-terminated ASCII string. + * The field is big enough to store a GUID in text form. If the hardware + * doesn't provide a unique serial number this field must be left empty. + * + * - bus_info represents the location of the device in the system as a + * NUL-terminated ASCII string. For PCI/PCIe devices bus_info must be set to + * "PCI:" (or "PCIe:") followed by the value of pci_name(). For USB devices, + * the usb_make_path() function must be used. This field is used by + * applications to distinguish between otherwise identical devices that don't + * provide a serial number. + * + * - hw_revision is the hardware device revision in a driver-specific format. + * When possible the revision should be formatted with the KERNEL_VERSION + * macro. + * + * - driver_version is formatted with the KERNEL_VERSION macro. The version + * minor must be incremented when new features are added to the userspace API + * without breaking binary compatibility. The version major must be + * incremented when binary compatibility is broken. + * + * Notes: + * + * Upon successful registration a character device named media[0-9]+ is created. + * The device major and minor numbers are dynamic. The model name is exported as + * a sysfs attribute. + * + * Unregistering a media device that hasn't been registered is *NOT* safe. + * + * Return: returns zero on success or a negative error code. + */ int __must_check __media_device_register(struct media_device *mdev, struct module *owner); #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) + +/** + * __media_device_unregister() - Unegisters a media device element + * + * @mdev: pointer to struct &media_device + * + * + * It is safe to call this function on an unregistered (but initialised) + * media device. + */ void media_device_unregister(struct media_device *mdev); +/** + * media_device_register_entity() - registers a media entity inside a + * previously registered media device. + * + * @mdev: pointer to struct &media_device + * @entity: pointer to struct &media_entity to be registered + * + * Entities are identified by a unique positive integer ID. The media + * controller framework will such ID automatically. IDs are not guaranteed + * to be contiguous, and the ID number can change on newer Kernel versions. + * So, neither the driver nor userspace should hardcode ID numbers to refer + * to the entities, but, instead, use the framework to find the ID, when + * needed. + * + * The media_entity name, type and flags fields should be initialized before + * calling media_device_register_entity(). Entities embedded in higher-level + * standard structures can have some of those fields set by the higher-level + * framework. + * + * If the device has pads, media_entity_pads_init() should be called before + * this function. Otherwise, the &media_entity.@pad and &media_entity.@num_pads + * should be zeroed before calling this function. + * + * Entities have flags that describe the entity capabilities and state: + * + * %MEDIA_ENT_FL_DEFAULT indicates the default entity for a given type. + * This can be used to report the default audio and video devices or the + * default camera sensor. + * + * NOTE: Drivers should set the entity function before calling this function. + * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and + * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. + */ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity); + +/* + * media_device_unregister_entity() - unregisters a media entity. + * + * @entity: pointer to struct &media_entity to be unregistered + * + * All links associated with the entity and all PADs are automatically + * unregistered from the media_device when this function is called. + * + * Unregistering an entity will not change the IDs of the other entities and + * the previoully used ID will never be reused for a newly registered entities. + * + * When a media device is unregistered, all its entities are unregistered + * automatically. No manual entities unregistration is then required. + * + * Note: the media_entity instance itself must be freed explicitly by + * the driver if required. + */ void media_device_unregister_entity(struct media_entity *entity); +/** + * media_device_get_devres() - get media device as device resource + * creates if one doesn't exist + * + * @dev: pointer to struct &device. + * + * Sometimes, the media controller &media_device needs to be shared by more + * than one driver. This function adds support for that, by dynamically + * allocating the &media_device and allowing it to be obtained from the + * struct &device associated with the common device where all sub-device + * components belong. So, for example, on an USB device with multiple + * interfaces, each interface may be handled by a separate per-interface + * drivers. While each interface have its own &device, they all share a + * common &device associated with the hole USB device. + */ +struct media_device *media_device_get_devres(struct device *dev); + +/** + * media_device_find_devres() - find media device as device resource + * + * @dev: pointer to struct &device. + */ +struct media_device *media_device_find_devres(struct device *dev); + /* Iterate over all entities. */ #define media_device_for_each_entity(entity, mdev) \ - list_for_each_entry(entity, &(mdev)->entities, list) + list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) + +/* Iterate over all interfaces. */ +#define media_device_for_each_intf(intf, mdev) \ + list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) + +/* Iterate over all pads. */ +#define media_device_for_each_pad(pad, mdev) \ + list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) +/* Iterate over all links. */ +#define media_device_for_each_link(link, mdev) \ + list_for_each_entry(link, &(mdev)->links, graph_obj.list) +#else +static inline int media_device_register(struct media_device *mdev) +{ + return 0; +} +static inline void media_device_unregister(struct media_device *mdev) +{ +} +static inline int media_device_register_entity(struct media_device *mdev, + struct media_entity *entity) +{ + return 0; +} +static inline void media_device_unregister_entity(struct media_entity *entity) +{ +} +static inline struct media_device *media_device_get_devres(struct device *dev) +{ + return NULL; +} +static inline struct media_device *media_device_find_devres(struct device *dev) +{ + return NULL; +} +#endif /* CONFIG_MEDIA_CONTROLLER */ #endif diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h index 17ddae32060d..fe42f08e72bd 100644 --- a/include/media/media-devnode.h +++ b/include/media/media-devnode.h @@ -40,6 +40,20 @@ */ #define MEDIA_FLAG_REGISTERED 0 +/** + * struct media_file_operations - Media device file operations + * + * @owner: should be filled with %THIS_MODULE + * @read: pointer to the function that implements read() syscall + * @write: pointer to the function that implements write() syscall + * @poll: pointer to the function that implements poll() syscall + * @ioctl: pointer to the function that implements ioctl() syscall + * @compat_ioctl: pointer to the function that will handle 32 bits userspace + * calls to the the ioctl() syscall on a Kernel compiled with 64 bits. + * @open: pointer to the function that implements open() syscall + * @release: pointer to the function that will release the resources allocated + * by the @open function. + */ struct media_file_operations { struct module *owner; ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); @@ -53,7 +67,7 @@ struct media_file_operations { /** * struct media_devnode - Media device node - * @fops: pointer to struct media_file_operations with media device ops + * @fops: pointer to struct &media_file_operations with media device ops * @dev: struct device pointer for the media controller device * @cdev: struct cdev pointer character device * @parent: parent device @@ -86,15 +100,53 @@ struct media_devnode { /* dev to media_devnode */ #define to_media_devnode(cd) container_of(cd, struct media_devnode, dev) +/** + * media_devnode_register - register a media device node + * + * @mdev: media device node structure we want to register + * @owner: should be filled with %THIS_MODULE + * + * The registration code assigns minor numbers and registers the new device node + * with the kernel. An error is returned if no free minor number can be found, + * or if the registration of the device node fails. + * + * Zero is returned on success. + * + * Note that if the media_devnode_register call fails, the release() callback of + * the media_devnode structure is *not* called, so the caller is responsible for + * freeing any data. + */ int __must_check media_devnode_register(struct media_devnode *mdev, struct module *owner); + +/** + * media_devnode_unregister - unregister a media device node + * @mdev: the device node to unregister + * + * This unregisters the passed device. Future open calls will be met with + * errors. + * + * This function can safely be called if the device node has never been + * registered or has already been unregistered. + */ void media_devnode_unregister(struct media_devnode *mdev); +/** + * media_devnode_data - returns a pointer to the &media_devnode + * + * @filp: pointer to struct &file + */ static inline struct media_devnode *media_devnode_data(struct file *filp) { return filp->private_data; } +/** + * media_devnode_is_registered - returns true if &media_devnode is registered; + * false otherwise. + * + * @mdev: pointer to struct &media_devnode. + */ static inline int media_devnode_is_registered(struct media_devnode *mdev) { return test_bit(MEDIA_FLAG_REGISTERED, &mdev->flags); diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 197f93799753..fe485d367985 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -23,25 +23,151 @@ #ifndef _MEDIA_ENTITY_H #define _MEDIA_ENTITY_H -#include <linux/bitops.h> +#include <linux/bitmap.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/media.h> +/* Enums used internally at the media controller to represent graphs */ + +/** + * enum media_gobj_type - type of a graph object + * + * @MEDIA_GRAPH_ENTITY: Identify a media entity + * @MEDIA_GRAPH_PAD: Identify a media pad + * @MEDIA_GRAPH_LINK: Identify a media link + * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via + * a device node + */ +enum media_gobj_type { + MEDIA_GRAPH_ENTITY, + MEDIA_GRAPH_PAD, + MEDIA_GRAPH_LINK, + MEDIA_GRAPH_INTF_DEVNODE, +}; + +#define MEDIA_BITS_PER_TYPE 8 +#define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) +#define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) + +/* Structs to represent the objects that belong to a media graph */ + +/** + * struct media_gobj - Define a graph object. + * + * @mdev: Pointer to the struct media_device that owns the object + * @id: Non-zero object ID identifier. The ID should be unique + * inside a media_device, as it is composed by + * %MEDIA_BITS_PER_TYPE to store the type plus + * %MEDIA_BITS_PER_ID to store the ID + * @list: List entry stored in one of the per-type mdev object lists + * + * All objects on the media graph should have this struct embedded + */ +struct media_gobj { + struct media_device *mdev; + u32 id; + struct list_head list; +}; + +#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 + +/** + * struct media_entity_enum - An enumeration of media entities. + * + * @bmap: Bit map in which each bit represents one entity at struct + * media_entity->internal_idx. + * @idx_max: Number of bits in bmap + */ +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +/** + * struct media_entity_graph - Media graph traversal state + * + * @stack: Graph traversal stack; the stack contains information + * on the path the media entities to be walked and the + * links through which they were reached. + * @ent_enum: Visited entities + * @top: The top of the stack + */ +struct media_entity_graph { + struct { + struct media_entity *entity; + struct list_head *link; + } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; + + struct media_entity_enum ent_enum; + int top; +}; + +/* + * struct media_pipeline - Media pipeline related information + * + * @streaming_count: Streaming start count - streaming stop count + * @graph: Media graph walk during pipeline start / stop + */ struct media_pipeline { + int streaming_count; + struct media_entity_graph graph; }; +/** + * struct media_link - A link object part of a media graph. + * + * @graph_obj: Embedded structure containing the media object common data + * @list: Linked list associated with an entity or an interface that + * owns the link. + * @gobj0: Part of a union. Used to get the pointer for the first + * graph_object of the link. + * @source: Part of a union. Used only if the first object (gobj0) is + * a pad. In that case, it represents the source pad. + * @intf: Part of a union. Used only if the first object (gobj0) is + * an interface. + * @gobj1: Part of a union. Used to get the pointer for the second + * graph_object of the link. + * @source: Part of a union. Used only if the second object (gobj1) is + * a pad. In that case, it represents the sink pad. + * @entity: Part of a union. Used only if the second object (gobj1) is + * an entity. + * @reverse: Pointer to the link for the reverse direction of a pad to pad + * link. + * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) + * @is_backlink: Indicate if the link is a backlink. + */ struct media_link { - struct media_pad *source; /* Source pad */ - struct media_pad *sink; /* Sink pad */ - struct media_link *reverse; /* Link in the reverse direction */ - unsigned long flags; /* Link flags (MEDIA_LNK_FL_*) */ + struct media_gobj graph_obj; + struct list_head list; + union { + struct media_gobj *gobj0; + struct media_pad *source; + struct media_interface *intf; + }; + union { + struct media_gobj *gobj1; + struct media_pad *sink; + struct media_entity *entity; + }; + struct media_link *reverse; + unsigned long flags; + bool is_backlink; }; +/** + * struct media_pad - A media pad graph object. + * + * @graph_obj: Embedded structure containing the media object common data + * @entity: Entity this pad belongs to + * @index: Pad index in the entity pads array, numbered from 0 to n + * @flags: Pad flags, as defined in uapi/media.h (MEDIA_PAD_FL_*) + */ struct media_pad { - struct media_entity *entity; /* Entity this pad belongs to */ - u16 index; /* Pad index in the entity pads array */ - unsigned long flags; /* Pad flags (MEDIA_PAD_FL_*) */ + struct media_gobj graph_obj; /* must be first field in struct */ + struct media_entity *entity; + u16 index; + unsigned long flags; }; /** @@ -60,105 +186,763 @@ struct media_entity_operations { int (*link_validate)(struct media_link *link); }; +/** + * struct media_entity - A media entity graph object. + * + * @graph_obj: Embedded structure containing the media object common data. + * @name: Entity name. + * @function: Entity main function, as defined in uapi/media.h + * (MEDIA_ENT_F_*) + * @flags: Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*) + * @num_pads: Number of sink and source pads. + * @num_links: Total number of links, forward and back, enabled and disabled. + * @num_backlinks: Number of backlinks + * @internal_idx: An unique internal entity specific number. The numbers are + * re-used if entities are unregistered or registered again. + * @pads: Pads array with the size defined by @num_pads. + * @links: List of data links. + * @ops: Entity operations. + * @stream_count: Stream count for the entity. + * @use_count: Use count for the entity. + * @pipe: Pipeline this entity belongs to. + * @info: Union with devnode information. Kept just for backward + * compatibility. + * @major: Devnode major number (zero if not applicable). Kept just + * for backward compatibility. + * @minor: Devnode minor number (zero if not applicable). Kept just + * for backward compatibility. + * + * NOTE: @stream_count and @use_count reference counts must never be + * negative, but are signed integers on purpose: a simple WARN_ON(<0) check + * can be used to detect reference count bugs that would make them negative. + */ struct media_entity { - struct list_head list; - struct media_device *parent; /* Media device this entity belongs to*/ - u32 id; /* Entity ID, unique in the parent media - * device context */ - const char *name; /* Entity name */ - u32 type; /* Entity type (MEDIA_ENT_T_*) */ - u32 revision; /* Entity revision, driver specific */ - unsigned long flags; /* Entity flags (MEDIA_ENT_FL_*) */ - u32 group_id; /* Entity group ID */ - - u16 num_pads; /* Number of sink and source pads */ - u16 num_links; /* Number of existing links, both - * enabled and disabled */ - u16 num_backlinks; /* Number of backlinks */ - u16 max_links; /* Maximum number of links */ - - struct media_pad *pads; /* Pads array (num_pads elements) */ - struct media_link *links; /* Links array (max_links elements)*/ - - const struct media_entity_operations *ops; /* Entity operations */ + struct media_gobj graph_obj; /* must be first field in struct */ + const char *name; + u32 function; + unsigned long flags; + + u16 num_pads; + u16 num_links; + u16 num_backlinks; + int internal_idx; + + struct media_pad *pads; + struct list_head links; + + const struct media_entity_operations *ops; /* Reference counts must never be negative, but are signed integers on * purpose: a simple WARN_ON(<0) check can be used to detect reference * count bugs that would make them negative. */ - int stream_count; /* Stream count for the entity. */ - int use_count; /* Use count for the entity. */ + int stream_count; + int use_count; - struct media_pipeline *pipe; /* Pipeline this entity belongs to. */ + struct media_pipeline *pipe; union { - /* Node specifications */ struct { u32 major; u32 minor; } dev; - - /* Sub-device specifications */ - /* Nothing needed yet */ } info; }; -static inline u32 media_entity_type(struct media_entity *entity) +/** + * struct media_interface - A media interface graph object. + * + * @graph_obj: embedded graph object + * @links: List of links pointing to graph entities + * @type: Type of the interface as defined in the + * uapi/media/media.h header, e. g. + * MEDIA_INTF_T_* + * @flags: Interface flags as defined in uapi/media/media.h + */ +struct media_interface { + struct media_gobj graph_obj; + struct list_head links; + u32 type; + u32 flags; +}; + +/** + * struct media_intf_devnode - A media interface via a device node. + * + * @intf: embedded interface object + * @major: Major number of a device node + * @minor: Minor number of a device node + */ +struct media_intf_devnode { + struct media_interface intf; + + /* Should match the fields at media_v2_intf_devnode */ + u32 major; + u32 minor; +}; + +/** + * media_entity_id() - return the media entity graph object id + * + * @entity: pointer to entity + */ +static inline u32 media_entity_id(struct media_entity *entity) { - return entity->type & MEDIA_ENT_TYPE_MASK; + return entity->graph_obj.id; } -static inline u32 media_entity_subtype(struct media_entity *entity) +/** + * media_type() - return the media object type + * + * @gobj: pointer to the media graph object + */ +static inline enum media_gobj_type media_type(struct media_gobj *gobj) { - return entity->type & MEDIA_ENT_SUBTYPE_MASK; + return gobj->id >> MEDIA_BITS_PER_ID; } -#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 -#define MEDIA_ENTITY_ENUM_MAX_ID 64 +/** + * media_id() - return the media object ID + * + * @gobj: pointer to the media graph object + */ +static inline u32 media_id(struct media_gobj *gobj) +{ + return gobj->id & MEDIA_ID_MASK; +} -/* - * The number of pads can't be bigger than the number of entities, - * as the worse-case scenario is to have one entity linked up to - * MEDIA_ENTITY_ENUM_MAX_ID - 1 entities. +/** + * media_gobj_gen_id() - encapsulates type and ID on at the object ID + * + * @type: object type as define at enum &media_gobj_type. + * @local_id: next ID, from struct &media_device.@id. */ -#define MEDIA_ENTITY_MAX_PADS (MEDIA_ENTITY_ENUM_MAX_ID - 1) +static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) +{ + u32 id; -struct media_entity_graph { - struct { - struct media_entity *entity; - int link; - } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; + id = type << MEDIA_BITS_PER_ID; + id |= local_id & MEDIA_ID_MASK; - DECLARE_BITMAP(entities, MEDIA_ENTITY_ENUM_MAX_ID); - int top; -}; + return id; +} + +/** + * is_media_entity_v4l2_io() - identify if the entity main function + * is a V4L2 I/O + * + * @entity: pointer to entity + * + * Return: true if the entity main function is one of the V4L2 I/O types + * (video, VBI or SDR radio); false otherwise. + */ +static inline bool is_media_entity_v4l2_io(struct media_entity *entity) +{ + if (!entity) + return false; + + switch (entity->function) { + case MEDIA_ENT_F_IO_V4L: + case MEDIA_ENT_F_IO_VBI: + case MEDIA_ENT_F_IO_SWRADIO: + return true; + default: + return false; + } +} + +/** + * is_media_entity_v4l2_subdev - return true if the entity main function is + * associated with the V4L2 API subdev usage + * + * @entity: pointer to entity + * + * This is an ancillary function used by subdev-based V4L2 drivers. + * It checks if the entity function is one of functions used by a V4L2 subdev, + * e. g. camera-relatef functions, analog TV decoder, TV tuner, V4L2 DSPs. + */ +static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) +{ + if (!entity) + return false; + + switch (entity->function) { + case MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN: + case MEDIA_ENT_F_CAM_SENSOR: + case MEDIA_ENT_F_FLASH: + case MEDIA_ENT_F_LENS: + case MEDIA_ENT_F_ATV_DECODER: + case MEDIA_ENT_F_TUNER: + return true; + + default: + return false; + } +} + +/** + * __media_entity_enum_init - Initialise an entity enumeration + * + * @ent_enum: Entity enumeration to be initialised + * @idx_max: Maximum number of entities in the enumeration + * + * Return: Returns zero on success or a negative error code. + */ +__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, + int idx_max); + +/** + * media_entity_enum_cleanup - Release resources of an entity enumeration + * + * @ent_enum: Entity enumeration to be released + */ +void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); + +/** + * media_entity_enum_zero - Clear the entire enum + * + * @ent_enum: Entity enumeration to be cleared + */ +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_set - Mark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be marked + */ +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; + + __set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_clear - Unmark a single entity in the enum + * + * @ent_enum: Entity enumeration + * @entity: Entity to be unmarked + */ +static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return; -int media_entity_init(struct media_entity *entity, u16 num_pads, - struct media_pad *pads, u16 extra_links); -void media_entity_cleanup(struct media_entity *entity); + __clear_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test - Test whether the entity is marked + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns true if the entity was marked. + */ +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return test_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_test - Test whether the entity is marked, and mark it + * + * @ent_enum: Entity enumeration + * @entity: Entity to be tested + * + * Returns true if the entity was marked, and mark it before doing so. + */ +static inline bool +media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) + return true; + + return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); +} + +/** + * media_entity_enum_empty - Test whether the entire enum is empty + * + * @ent_enum: Entity enumeration + * + * Returns true if the entity was marked. + */ +static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) +{ + return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); +} + +/** + * media_entity_enum_intersects - Test whether two enums intersect + * + * @ent_enum1: First entity enumeration + * @ent_enum2: Second entity enumeration + * + * Returns true if entity enumerations e and f intersect, otherwise false. + */ +static inline bool media_entity_enum_intersects( + struct media_entity_enum *ent_enum1, + struct media_entity_enum *ent_enum2) +{ + WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); + + return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, + min(ent_enum1->idx_max, ent_enum2->idx_max)); +} + +#define gobj_to_entity(gobj) \ + container_of(gobj, struct media_entity, graph_obj) + +#define gobj_to_pad(gobj) \ + container_of(gobj, struct media_pad, graph_obj) + +#define gobj_to_link(gobj) \ + container_of(gobj, struct media_link, graph_obj) + +#define gobj_to_link(gobj) \ + container_of(gobj, struct media_link, graph_obj) + +#define gobj_to_pad(gobj) \ + container_of(gobj, struct media_pad, graph_obj) + +#define gobj_to_intf(gobj) \ + container_of(gobj, struct media_interface, graph_obj) + +#define intf_to_devnode(intf) \ + container_of(intf, struct media_intf_devnode, intf) + +/** + * media_gobj_create - Initialize a graph object + * + * @mdev: Pointer to the media_device that contains the object + * @type: Type of the object + * @gobj: Pointer to the graph object + * + * This routine initializes the embedded struct media_gobj inside a + * media graph object. It is called automatically if media_*_create() + * calls are used. However, if the object (entity, link, pad, interface) + * is embedded on some other object, this function should be called before + * registering the object at the media controller. + */ +void media_gobj_create(struct media_device *mdev, + enum media_gobj_type type, + struct media_gobj *gobj); + +/** + * media_gobj_destroy - Stop using a graph object on a media device + * + * @gobj: Pointer to the graph object + * + * This should be called by all routines like media_device_unregister() + * that remove/destroy media graph objects. + */ +void media_gobj_destroy(struct media_gobj *gobj); + +/** + * media_entity_pads_init() - Initialize the entity pads + * + * @entity: entity where the pads belong + * @num_pads: total number of sink and source pads + * @pads: Array of @num_pads pads. + * + * The pads array is managed by the entity driver and passed to + * media_entity_pads_init() where its pointer will be stored in the entity + * structure. + * + * If no pads are needed, drivers could either directly fill + * &media_entity->@num_pads with 0 and &media_entity->@pads with NULL or call + * this function that will do the same. + * + * As the number of pads is known in advance, the pads array is not allocated + * dynamically but is managed by the entity driver. Most drivers will embed the + * pads array in a driver-specific structure, avoiding dynamic allocation. + * + * Drivers must set the direction of every pad in the pads array before calling + * media_entity_pads_init(). The function will initialize the other pads fields. + */ +int media_entity_pads_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pads); + +/** + * media_entity_cleanup() - free resources associated with an entity + * + * @entity: entity where the pads belong + * + * This function must be called during the cleanup phase after unregistering + * the entity (currently, it does nothing). + */ +static inline void media_entity_cleanup(struct media_entity *entity) {}; + +/** + * media_create_pad_link() - creates a link between two entities. + * + * @source: pointer to &media_entity of the source pad. + * @source_pad: number of the source pad in the pads array + * @sink: pointer to &media_entity of the sink pad. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * + * Valid values for flags: + * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be + * used to transfer media data. When two or more links target a sink pad, + * only one of them can be enabled at a time. + * + * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't + * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then + * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is + * always enabled. + * + * NOTE: + * + * Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for both ends. + */ +__must_check int media_create_pad_link(struct media_entity *source, + u16 source_pad, struct media_entity *sink, + u16 sink_pad, u32 flags); + +/** + * media_create_pad_links() - creates a link between two entities. + * + * @mdev: Pointer to the media_device that contains the object + * @source_function: Function of the source entities. Used only if @source is + * NULL. + * @source: pointer to &media_entity of the source pad. If NULL, it will use + * all entities that matches the @sink_function. + * @source_pad: number of the source pad in the pads array + * @sink_function: Function of the sink entities. Used only if @sink is NULL. + * @sink: pointer to &media_entity of the sink pad. If NULL, it will use + * all entities that matches the @sink_function. + * @sink_pad: number of the sink pad in the pads array. + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * @allow_both_undefined: if true, then both @source and @sink can be NULL. + * In such case, it will create a crossbar between all entities that + * matches @source_function to all entities that matches @sink_function. + * If false, it will return 0 and won't create any link if both @source + * and @sink are NULL. + * + * Valid values for flags: + * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be + * used to transfer media data. If multiple links are created and this + * flag is passed as an argument, only the first created link will have + * this flag. + * + * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't + * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then + * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is + * always enabled. + * + * It is common for some devices to have multiple source and/or sink entities + * of the same type that should be linked. While media_create_pad_link() + * creates link by link, this function is meant to allow 1:n, n:1 and even + * cross-bar (n:n) links. + * + * NOTE: Before calling this function, media_entity_pads_init() and + * media_device_register_entity() should be called previously for the entities + * to be linked. + */ +int media_create_pad_links(const struct media_device *mdev, + const u32 source_function, + struct media_entity *source, + const u16 source_pad, + const u32 sink_function, + struct media_entity *sink, + const u16 sink_pad, + u32 flags, + const bool allow_both_undefined); -int media_entity_create_link(struct media_entity *source, u16 source_pad, - struct media_entity *sink, u16 sink_pad, u32 flags); void __media_entity_remove_links(struct media_entity *entity); + +/** + * media_entity_remove_links() - remove all links associated with an entity + * + * @entity: pointer to &media_entity + * + * Note: this is called automatically when an entity is unregistered via + * media_device_register_entity(). + */ void media_entity_remove_links(struct media_entity *entity); +/** + * __media_entity_setup_link - Configure a media link without locking + * @link: The link being configured + * @flags: Link configuration flags + * + * The bulk of link setup is handled by the two entities connected through the + * link. This function notifies both entities of the link configuration change. + * + * If the link is immutable or if the current and new configuration are + * identical, return immediately. + * + * The user is expected to hold link->source->parent->mutex. If not, + * media_entity_setup_link() should be used instead. + */ int __media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_setup_link() - changes the link flags properties in runtime + * + * @link: pointer to &media_link + * @flags: the requested new link flags + * + * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag + * flag to enable/disable a link. Links marked with the + * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. + * + * When a link is enabled or disabled, the media framework calls the + * link_setup operation for the two entities at the source and sink of the + * link, in that order. If the second link_setup call fails, another + * link_setup call is made on the first entity to restore the original link + * flags. + * + * Media device drivers can be notified of link setup operations by setting the + * media_device::link_notify pointer to a callback function. If provided, the + * notification callback will be called before enabling and after disabling + * links. + * + * Entity drivers must implement the link_setup operation if any of their links + * is non-immutable. The operation must either configure the hardware or store + * the configuration information to be applied later. + * + * Link configuration must not have any side effect on other links. If an + * enabled link at a sink pad prevents another link at the same pad from + * being enabled, the link_setup operation must return -EBUSY and can't + * implicitly disable the first enabled link. + * + * NOTE: the valid values of the flags for the link is the same as described + * on media_create_pad_link(), for pad to pad links or the same as described + * on media_create_intf_link(), for interface to entity links. + */ int media_entity_setup_link(struct media_link *link, u32 flags); + +/** + * media_entity_find_link - Find a link between two pads + * @source: Source pad + * @sink: Sink pad + * + * Return a pointer to the link between the two entities. If no such link + * exists, return NULL. + */ struct media_link *media_entity_find_link(struct media_pad *source, struct media_pad *sink); + +/** + * media_entity_remote_pad - Find the pad at the remote end of a link + * @pad: Pad at the local end of the link + * + * Search for a remote pad connected to the given pad by iterating over all + * links originating or terminating at that pad until an enabled link is found. + * + * Return a pointer to the pad at the remote end of the first found enabled + * link, or NULL if no enabled link has been found. + */ struct media_pad *media_entity_remote_pad(struct media_pad *pad); +/** + * media_entity_get - Get a reference to the parent module + * + * @entity: The entity + * + * Get a reference to the parent media device module. + * + * The function will return immediately if @entity is NULL. + * + * Return a pointer to the entity on success or NULL on failure. + */ struct media_entity *media_entity_get(struct media_entity *entity); + +__must_check int media_entity_graph_walk_init( + struct media_entity_graph *graph, struct media_device *mdev); + +/** + * media_entity_graph_walk_cleanup - Release resources used by graph walk. + * + * @graph: Media graph structure that will be used to walk the graph + */ +void media_entity_graph_walk_cleanup(struct media_entity_graph *graph); + +/** + * media_entity_put - Release the reference to the parent module + * + * @entity: The entity + * + * Release the reference count acquired by media_entity_get(). + * + * The function will return immediately if @entity is NULL. + */ void media_entity_put(struct media_entity *entity); +/** + * media_entity_graph_walk_start - Start walking the media graph at a given entity + * @graph: Media graph structure that will be used to walk the graph + * @entity: Starting entity + * + * Before using this function, media_entity_graph_walk_init() must be + * used to allocate resources used for walking the graph. This + * function initializes the graph traversal structure to walk the + * entities graph starting at the given entity. The traversal + * structure must not be modified by the caller during graph + * traversal. After the graph walk, the resources must be released + * using media_entity_graph_walk_cleanup(). + */ void media_entity_graph_walk_start(struct media_entity_graph *graph, - struct media_entity *entity); + struct media_entity *entity); + +/** + * media_entity_graph_walk_next - Get the next entity in the graph + * @graph: Media graph structure + * + * Perform a depth-first traversal of the given media entities graph. + * + * The graph structure must have been previously initialized with a call to + * media_entity_graph_walk_start(). + * + * Return the next entity in the graph or NULL if the whole graph have been + * traversed. + */ struct media_entity * media_entity_graph_walk_next(struct media_entity_graph *graph); + +/** + * media_entity_pipeline_start - Mark a pipeline as streaming + * @entity: Starting entity + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Mark all entities connected to a given entity through enabled links, either + * directly or indirectly, as streaming. The given pipeline object is assigned to + * every entity in the pipeline and stored in the media_entity pipe field. + * + * Calls to this function can be nested, in which case the same number of + * media_entity_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * media_entity_pipeline_start(). + */ __must_check int media_entity_pipeline_start(struct media_entity *entity, struct media_pipeline *pipe); + +/** + * media_entity_pipeline_stop - Mark a pipeline as not streaming + * @entity: Starting entity + * + * Mark all entities connected to a given entity through enabled links, either + * directly or indirectly, as not streaming. The media_entity pipe field is + * reset to NULL. + * + * If multiple calls to media_entity_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + */ void media_entity_pipeline_stop(struct media_entity *entity); +/** + * media_devnode_create() - creates and initializes a device node interface + * + * @mdev: pointer to struct &media_device + * @type: type of the interface, as given by MEDIA_INTF_T_* macros + * as defined in the uapi/media/media.h header. + * @flags: Interface flags as defined in uapi/media/media.h. + * @major: Device node major number. + * @minor: Device node minor number. + * + * Return: if succeeded, returns a pointer to the newly allocated + * &media_intf_devnode pointer. + */ +struct media_intf_devnode * +__must_check media_devnode_create(struct media_device *mdev, + u32 type, u32 flags, + u32 major, u32 minor); +/** + * media_devnode_remove() - removes a device node interface + * + * @devnode: pointer to &media_intf_devnode to be freed. + * + * When a device node interface is removed, all links to it are automatically + * removed. + */ +void media_devnode_remove(struct media_intf_devnode *devnode); +struct media_link * + +/** + * media_create_intf_link() - creates a link between an entity and an interface + * + * @entity: pointer to %media_entity + * @intf: pointer to %media_interface + * @flags: Link flags, as defined in include/uapi/linux/media.h. + * + * + * Valid values for flags: + * The %MEDIA_LNK_FL_ENABLED flag indicates that the interface is connected to + * the entity hardware. That's the default value for interfaces. An + * interface may be disabled if the hardware is busy due to the usage + * of some other interface that it is currently controlling the hardware. + * A typical example is an hybrid TV device that handle only one type of + * stream on a given time. So, when the digital TV is streaming, + * the V4L2 interfaces won't be enabled, as such device is not able to + * also stream analog TV or radio. + * + * Note: + * + * Before calling this function, media_devnode_create() should be called for + * the interface and media_device_register_entity() should be called for the + * interface that will be part of the link. + */ +__must_check media_create_intf_link(struct media_entity *entity, + struct media_interface *intf, + u32 flags); +/** + * __media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * Note: this is an unlocked version of media_remove_intf_link() + */ +void __media_remove_intf_link(struct media_link *link); + +/** + * media_remove_intf_link() - remove a single interface link + * + * @link: pointer to &media_link. + * + * Note: prefer to use this one, instead of __media_remove_intf_link() + */ +void media_remove_intf_link(struct media_link *link); + +/** + * __media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * Note: this is an unlocked version of media_remove_intf_links(). + */ +void __media_remove_intf_links(struct media_interface *intf); + +/** + * media_remove_intf_links() - remove all links associated with an interface + * + * @intf: pointer to &media_interface + * + * Notes: + * + * this is called automatically when an entity is unregistered via + * media_device_register_entity() and by media_devnode_remove(). + * + * Prefer to use this one, instead of __media_remove_intf_links(). + */ +void media_remove_intf_links(struct media_interface *intf); + #define media_entity_call(entity, operation, args...) \ (((entity)->ops && (entity)->ops->operation) ? \ (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) diff --git a/include/media/rc-core.h b/include/media/rc-core.h index ec921f6538c7..f6494709e230 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -239,6 +239,7 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev) memset(ev, 0, sizeof(*ev)); } +#define IR_DEFAULT_TIMEOUT MS_TO_NS(125) #define IR_MAX_DURATION 500000000 /* 500 ms */ #define US_TO_NS(usec) ((usec) * 1000) #define MS_TO_US(msec) ((msec) * 1000) diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 7c4bbc4dfab4..7844e9879497 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -33,26 +33,26 @@ enum rc_type { RC_TYPE_XMP = 18, /* XMP protocol */ }; -#define RC_BIT_NONE 0 -#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN) -#define RC_BIT_OTHER (1 << RC_TYPE_OTHER) -#define RC_BIT_RC5 (1 << RC_TYPE_RC5) -#define RC_BIT_RC5X (1 << RC_TYPE_RC5X) -#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ) -#define RC_BIT_JVC (1 << RC_TYPE_JVC) -#define RC_BIT_SONY12 (1 << RC_TYPE_SONY12) -#define RC_BIT_SONY15 (1 << RC_TYPE_SONY15) -#define RC_BIT_SONY20 (1 << RC_TYPE_SONY20) -#define RC_BIT_NEC (1 << RC_TYPE_NEC) -#define RC_BIT_SANYO (1 << RC_TYPE_SANYO) -#define RC_BIT_MCE_KBD (1 << RC_TYPE_MCE_KBD) -#define RC_BIT_RC6_0 (1 << RC_TYPE_RC6_0) -#define RC_BIT_RC6_6A_20 (1 << RC_TYPE_RC6_6A_20) -#define RC_BIT_RC6_6A_24 (1 << RC_TYPE_RC6_6A_24) -#define RC_BIT_RC6_6A_32 (1 << RC_TYPE_RC6_6A_32) -#define RC_BIT_RC6_MCE (1 << RC_TYPE_RC6_MCE) -#define RC_BIT_SHARP (1 << RC_TYPE_SHARP) -#define RC_BIT_XMP (1 << RC_TYPE_XMP) +#define RC_BIT_NONE 0ULL +#define RC_BIT_UNKNOWN (1ULL << RC_TYPE_UNKNOWN) +#define RC_BIT_OTHER (1ULL << RC_TYPE_OTHER) +#define RC_BIT_RC5 (1ULL << RC_TYPE_RC5) +#define RC_BIT_RC5X (1ULL << RC_TYPE_RC5X) +#define RC_BIT_RC5_SZ (1ULL << RC_TYPE_RC5_SZ) +#define RC_BIT_JVC (1ULL << RC_TYPE_JVC) +#define RC_BIT_SONY12 (1ULL << RC_TYPE_SONY12) +#define RC_BIT_SONY15 (1ULL << RC_TYPE_SONY15) +#define RC_BIT_SONY20 (1ULL << RC_TYPE_SONY20) +#define RC_BIT_NEC (1ULL << RC_TYPE_NEC) +#define RC_BIT_SANYO (1ULL << RC_TYPE_SANYO) +#define RC_BIT_MCE_KBD (1ULL << RC_TYPE_MCE_KBD) +#define RC_BIT_RC6_0 (1ULL << RC_TYPE_RC6_0) +#define RC_BIT_RC6_6A_20 (1ULL << RC_TYPE_RC6_6A_20) +#define RC_BIT_RC6_6A_24 (1ULL << RC_TYPE_RC6_6A_24) +#define RC_BIT_RC6_6A_32 (1ULL << RC_TYPE_RC6_6A_32) +#define RC_BIT_RC6_MCE (1ULL << RC_TYPE_RC6_MCE) +#define RC_BIT_SHARP (1ULL << RC_TYPE_SHARP) +#define RC_BIT_XMP (1ULL << RC_TYPE_XMP) #define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \ RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \ diff --git a/include/media/tuner.h b/include/media/tuner.h index 486b6a54363b..e5321fda5489 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -21,6 +21,14 @@ #include <linux/videodev2.h> +/* Tuner PADs */ +/* FIXME: is this the right place for it? */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_IF_OUTPUT, + TUNER_NUM_PADS +}; + #define ADDR_UNSET (255) #define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h index 3ef6e3d5ed6c..2b94662d005c 100644 --- a/include/media/v4l2-clk.h +++ b/include/media/v4l2-clk.h @@ -65,7 +65,12 @@ static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id, return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE); } +#define V4L2_CLK_NAME_SIZE 64 + #define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \ "%d-%04x", adap, client) +#define v4l2_clk_name_of(name, size, of_full_name) snprintf(name, size, \ + "of-%s", of_full_name) + #endif diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index acbcd2f5fe7f..eeabf20e87a6 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -86,6 +86,7 @@ struct video_device { #if defined(CONFIG_MEDIA_CONTROLLER) struct media_entity entity; + struct media_intf_devnode *intf_devnode; #endif /* device ops */ const struct v4l2_file_operations *fops; diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h index a209526b6014..1113c8874c26 100644 --- a/include/media/v4l2-dv-timings.h +++ b/include/media/v4l2-dv-timings.h @@ -107,12 +107,14 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, * @standard: the timings according to the standard. * @pclock_delta: maximum delta in Hz between standard->pixelclock and * the measured timings. + * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not + * match. * * Returns true if the two timings match, returns false otherwise. */ bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured, const struct v4l2_dv_timings *standard, - unsigned pclock_delta); + unsigned pclock_delta, bool match_reduced_fps); /** * v4l2_print_dv_timings() - log the contents of a dv_timings struct @@ -183,4 +185,25 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, */ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait); +/* + * reduce_fps - check if conditions for reduced fps are true. + * bt - v4l2 timing structure + * For different timings reduced fps is allowed if following conditions + * are met - + * For CVT timings: if reduced blanking v2 (vsync == 8) is true. + * For CEA861 timings: if V4L2_DV_FL_CAN_REDUCE_FPS flag is true. + */ +static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) +{ + if ((bt->standards & V4L2_DV_BT_STD_CVT) && (bt->vsync == 8)) + return true; + + if ((bt->standards & V4L2_DV_BT_STD_CEA861) && + (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS)) + return true; + + return false; +} + + #endif diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 647ebfe5174f..ef03ae56b1c1 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -129,6 +129,8 @@ struct vb2_mem_ops { * @dbuf_mapped: flag to show whether dbuf is mapped or not * @bytesused: number of bytes occupied by data in the plane (payload) * @length: size of this plane (NOT the payload) in bytes + * @min_length: minimum required size of this plane (NOT the payload) in bytes. + * @length is always greater or equal to @min_length. * @offset: when memory in the associated struct vb2_buffer is * VB2_MEMORY_MMAP, equals the offset from the start of * the device memory for this plane (or is a "cookie" that @@ -150,6 +152,7 @@ struct vb2_plane { unsigned int dbuf_mapped; unsigned int bytesused; unsigned int length; + unsigned int min_length; union { unsigned int offset; unsigned long userptr; @@ -211,6 +214,7 @@ struct vb2_queue; * @num_planes: number of planes in the buffer * on an internal driver queue * @planes: private per-plane information; do not change + * @timestamp: frame timestamp in ns */ struct vb2_buffer { struct vb2_queue *vb2_queue; @@ -219,6 +223,7 @@ struct vb2_buffer { unsigned int memory; unsigned int num_planes; struct vb2_plane planes[VB2_MAX_PLANES]; + u64 timestamp; /* private: internal use only * @@ -268,21 +273,26 @@ struct vb2_buffer { * struct vb2_ops - driver-specific callbacks * * @queue_setup: called from VIDIOC_REQBUFS and VIDIOC_CREATE_BUFS - * handlers before memory allocation, or, if - * *num_planes != 0, after the allocation to verify a - * smaller number of buffers. Driver should return - * the required number of buffers in *num_buffers, the - * required number of planes per buffer in *num_planes; the - * size of each plane should be set in the sizes[] array - * and optional per-plane allocator specific context in the - * alloc_ctxs[] array. When called from VIDIOC_REQBUFS, - * fmt == NULL, the driver has to use the currently - * configured format and *num_buffers is the total number - * of buffers, that are being allocated. When called from - * VIDIOC_CREATE_BUFS, fmt != NULL and it describes the - * target frame format (if the format isn't valid the - * callback must return -EINVAL). In this case *num_buffers - * are being allocated additionally to q->num_buffers. + * handlers before memory allocation. It can be called + * twice: if the original number of requested buffers + * could not be allocated, then it will be called a + * second time with the actually allocated number of + * buffers to verify if that is OK. + * The driver should return the required number of buffers + * in *num_buffers, the required number of planes per + * buffer in *num_planes, the size of each plane should be + * set in the sizes[] array and optional per-plane + * allocator specific context in the alloc_ctxs[] array. + * When called from VIDIOC_REQBUFS, *num_planes == 0, the + * driver has to use the currently configured format to + * determine the plane sizes and *num_buffers is the total + * number of buffers that are being allocated. When called + * from VIDIOC_CREATE_BUFS, *num_planes != 0 and it + * describes the requested number of planes and sizes[] + * contains the requested plane sizes. If either + * *num_planes or the requested sizes are invalid callback + * must return -EINVAL. In this case *num_buffers are + * being allocated additionally to q->num_buffers. * @wait_prepare: release any locks taken while calling vb2 functions; * it is called before an ioctl needs to wait for a new * buffer to arrive; required to avoid a deadlock in @@ -344,7 +354,7 @@ struct vb2_buffer { * pre-queued buffers before calling STREAMON. */ struct vb2_ops { - int (*queue_setup)(struct vb2_queue *q, const void *parg, + int (*queue_setup)(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]); @@ -362,11 +372,22 @@ struct vb2_ops { void (*buf_queue)(struct vb2_buffer *vb); }; +/** + * struct vb2_ops - driver-specific callbacks + * + * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. + * For V4L2 this is a struct v4l2_buffer. + * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. + * If the userspace structure is invalid, then this op + * will return an error. + * @copy_timestamp: copy the timestamp from a userspace structure to + * the vb2_buffer struct. + */ struct vb2_buf_ops { - int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, struct vb2_plane *planes); - int (*set_timestamp)(struct vb2_buffer *vb, const void *pb); + void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb); }; /** @@ -429,6 +450,7 @@ struct vb2_buf_ops { * called since poll() needs to return POLLERR in that situation. * @is_multiplanar: set if buffer type is multiplanar * @is_output: set if buffer type is output + * @copy_timestamp: set if vb2-core should set timestamps * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the * last decoded buffer was already dequeued. Set for capture queues * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued. @@ -470,7 +492,6 @@ struct vb2_queue { wait_queue_head_t done_wq; void *alloc_ctx[VB2_MAX_PLANES]; - unsigned int plane_sizes[VB2_MAX_PLANES]; unsigned int streaming:1; unsigned int start_streaming_called:1; @@ -478,6 +499,7 @@ struct vb2_queue { unsigned int waiting_for_buffers:1; unsigned int is_multiplanar:1; unsigned int is_output:1; + unsigned int copy_timestamp:1; unsigned int last_buffer_dequeued:1; struct vb2_fileio_data *fileio; @@ -503,11 +525,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); void vb2_discard_done(struct vb2_queue *q); int vb2_wait_for_all_buffers(struct vb2_queue *q); -int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); +void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int *count); int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, const void *parg); + unsigned int *count, unsigned requested_planes, + const unsigned int requested_sizes[]); int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking); @@ -531,6 +554,42 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long pgoff, unsigned long flags); #endif +unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait); +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock); +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblock); + +/* + * vb2_thread_fnc - callback function for use with vb2_thread + * + * This is called whenever a buffer is dequeued in the thread. + */ +typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); + +/** + * vb2_thread_start() - start a thread for the given queue. + * @q: videobuf queue + * @fnc: callback function + * @priv: priv pointer passed to the callback function + * @thread_name:the name of the thread. This will be prefixed with "vb2-". + * + * This starts a thread that will queue and dequeue until an error occurs + * or @vb2_thread_stop is called. + * + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailinglist first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name); + +/** + * vb2_thread_stop() - stop the thread for the given queue. + * @q: videobuf queue + */ +int vb2_thread_stop(struct vb2_queue *q); /** * vb2_is_streaming() - return streaming status of the queue @@ -635,4 +694,11 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) q->last_buffer_dequeued = false; } +/* + * The following functions are not part of the vb2 core API, but are useful + * functions for videobuf2-*. + */ +bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); +int vb2_verify_memory_type(struct vb2_queue *q, + enum vb2_memory memory, unsigned int type); #endif /* _MEDIA_VIDEOBUF2_CORE_H */ diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index 5abab1e7c7e8..3cc836f76675 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -28,7 +28,6 @@ * @vb2_buf: video buffer 2 * @flags: buffer informational flags * @field: enum v4l2_field; field order of the image in the buffer - * @timestamp: frame timestamp * @timecode: frame timecode * @sequence: sequence count of this frame * Should contain enough information to be able to cover all the fields @@ -39,7 +38,6 @@ struct vb2_v4l2_buffer { __u32 flags; __u32 field; - struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; }; @@ -65,42 +63,8 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); int __must_check vb2_queue_init(struct vb2_queue *q); void vb2_queue_release(struct vb2_queue *q); - -unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); -size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, - loff_t *ppos, int nonblock); -size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, - loff_t *ppos, int nonblock); - -/* - * vb2_thread_fnc - callback function for use with vb2_thread - * - * This is called whenever a buffer is dequeued in the thread. - */ -typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); - -/** - * vb2_thread_start() - start a thread for the given queue. - * @q: videobuf queue - * @fnc: callback function - * @priv: priv pointer passed to the callback function - * @thread_name:the name of the thread. This will be prefixed with "vb2-". - * - * This starts a thread that will queue and dequeue until an error occurs - * or @vb2_thread_stop is called. - * - * This function should not be used for anything else but the videobuf2-dvb - * support. If you think you have another good use-case for this, then please - * contact the linux-media mailinglist first. - */ -int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, - const char *thread_name); - -/** - * vb2_thread_stop() - stop the thread for the given queue. - * @q: videobuf queue - */ -int vb2_thread_stop(struct vb2_queue *q); +unsigned int vb2_poll(struct vb2_queue *q, struct file *file, + poll_table *wait); /* * The following functions are not part of the vb2 core API, but are simple diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index cf3bc564ac03..2f6a3f2233ed 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -53,6 +53,8 @@ #ifndef __6LOWPAN_H__ #define __6LOWPAN_H__ +#include <linux/debugfs.h> + #include <net/ipv6.h> #include <net/net_namespace.h> @@ -98,6 +100,7 @@ enum lowpan_lltypes { struct lowpan_priv { enum lowpan_lltypes lltype; + struct dentry *iface_debugfs; /* must be last */ u8 priv[0] __aligned(sizeof(void *)); @@ -185,7 +188,12 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data, *hc_ptr += len; } -void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); +int lowpan_register_netdevice(struct net_device *dev, + enum lowpan_lltypes lltype); +int lowpan_register_netdev(struct net_device *dev, + enum lowpan_lltypes lltype); +void lowpan_unregister_netdevice(struct net_device *dev); +void lowpan_unregister_netdev(struct net_device *dev); /** * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 78003dfb8539..47f52d3cd8df 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -87,7 +87,8 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags); int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2); +int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 42844d7b154a..bfd1590821d6 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -29,6 +29,8 @@ #include <net/sock.h> #include <linux/seq_file.h> +#define BT_SUBSYS_VERSION "2.21" + #ifndef AF_BLUETOOTH #define AF_BLUETOOTH 31 #define PF_BLUETOOTH AF_BLUETOOTH @@ -296,12 +298,17 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb); +#define HCI_REQ_START BIT(0) +#define HCI_REQ_SKB BIT(1) + struct hci_ctrl { __u16 opcode; - bool req_start; + u8 req_flags; u8 req_event; - hci_req_complete_t req_complete; - hci_req_complete_skb_t req_complete_skb; + union { + hci_req_complete_t req_complete; + hci_req_complete_skb_t req_complete_skb; + }; }; struct bt_skb_cb { @@ -316,15 +323,17 @@ struct bt_skb_cb { }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) +#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type +#define hci_skb_expect(skb) bt_cb((skb))->expect +#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode + static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) { struct sk_buff *skb; skb = alloc_skb(len + BT_SKB_RESERVE, how); - if (skb) { + if (skb) skb_reserve(skb, BT_SKB_RESERVE); - bt_cb(skb)->incoming = 0; - } return skb; } @@ -334,10 +343,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, struct sk_buff *skb; skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err); - if (skb) { + if (skb) skb_reserve(skb, BT_SKB_RESERVE); - bt_cb(skb)->incoming = 0; - } if (!skb && *err) return NULL; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 0205b80cc90b..339ea57be423 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -239,7 +239,6 @@ enum { HCI_LE_ENABLED, HCI_ADVERTISING, HCI_ADVERTISING_CONNECTABLE, - HCI_ADVERTISING_INSTANCE, HCI_CONNECTABLE, HCI_DISCOVERABLE, HCI_LIMITED_DISCOVERABLE, @@ -452,7 +451,8 @@ enum { #define HCI_ERROR_REMOTE_POWER_OFF 0x15 #define HCI_ERROR_LOCAL_HOST_TERM 0x16 #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 -#define HCI_ERROR_INVALID_LL_PARAMS 0x1E +#define HCI_ERROR_INVALID_LL_PARAMS 0x1e +#define HCI_ERROR_UNSPECIFIED 0x1f #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c /* Flow control modes */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 1878d0a96333..d4f82edb5cff 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -77,6 +77,7 @@ struct discovery_state { u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; + bool limited; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; @@ -327,6 +328,14 @@ struct hci_dev { struct work_struct cmd_work; struct work_struct tx_work; + struct work_struct discov_update; + struct work_struct bg_scan_update; + struct work_struct scan_update; + struct work_struct connectable_update; + struct work_struct discoverable_update; + struct delayed_work le_scan_disable; + struct delayed_work le_scan_restart; + struct sk_buff_head rx_q; struct sk_buff_head raw_q; struct sk_buff_head cmd_q; @@ -370,9 +379,6 @@ struct hci_dev { DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); - struct delayed_work le_scan_disable; - struct delayed_work le_scan_restart; - __s8 adv_tx_power; __u8 adv_data[HCI_MAX_AD_LENGTH]; __u8 adv_data_len; @@ -875,7 +881,7 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout, u8 role); + u16 conn_timeout); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, u8 role); @@ -1036,7 +1042,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); -void hci_conn_params_clear_all(struct hci_dev *hdev); void hci_conn_params_clear_disabled(struct hci_dev *hdev); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, @@ -1279,31 +1284,41 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, mutex_unlock(&hci_cb_list_lock); } -static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type) +static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, + size_t *data_len) { size_t parsed = 0; - if (data_len < 2) - return false; + if (eir_len < 2) + return NULL; - while (parsed < data_len - 1) { - u8 field_len = data[0]; + while (parsed < eir_len - 1) { + u8 field_len = eir[0]; if (field_len == 0) break; parsed += field_len + 1; - if (parsed > data_len) + if (parsed > eir_len) break; - if (data[1] == type) - return true; + if (eir[1] != type) { + eir += field_len + 1; + continue; + } + + /* Zero length data */ + if (field_len == 1) + return NULL; - data += field_len + 1; + if (data_len) + *data_len = field_len - 1; + + return &eir[2]; } - return false; + return NULL; } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) @@ -1431,10 +1446,8 @@ int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); void mgmt_set_powered_failed(struct hci_dev *hdev, int err); -int mgmt_powered(struct hci_dev *hdev, u8 powered); -int mgmt_update_adv_data(struct hci_dev *hdev); -void mgmt_discoverable_timeout(struct hci_dev *hdev); -void mgmt_adv_timeout_expired(struct hci_dev *hdev); +void mgmt_power_on(struct hci_dev *hdev, int err); +void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, @@ -1473,6 +1486,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); +void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); +void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); @@ -1487,8 +1502,15 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout); -void mgmt_reenable_advertising(struct hci_dev *hdev); void mgmt_smp_complete(struct hci_conn *conn, bool complete); +bool mgmt_get_connectable(struct hci_dev *hdev); +void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status); +void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status); +u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); +void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, + u8 instance); +void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, + u8 instance); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h index 2b67567cf28d..587d0131b349 100644 --- a/include/net/bluetooth/hci_mon.h +++ b/include/net/bluetooth/hci_mon.h @@ -43,6 +43,8 @@ struct hci_mon_hdr { #define HCI_MON_CLOSE_INDEX 9 #define HCI_MON_INDEX_INFO 10 #define HCI_MON_VENDOR_DIAG 11 +#define HCI_MON_SYSTEM_NOTE 12 +#define HCI_MON_USER_LOGGING 13 struct hci_mon_new_index { __u8 type; diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9a46d665c1b5..8e9138acdae1 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -45,6 +45,7 @@ struct sockaddr_hci { #define HCI_CHANNEL_USER 1 #define HCI_CHANNEL_MONITOR 2 #define HCI_CHANNEL_CONTROL 3 +#define HCI_CHANNEL_LOGGING 4 struct hci_filter { unsigned long type_mask; diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index b831242d48a4..ea73e0826aa7 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -571,6 +571,21 @@ struct mgmt_rp_remove_advertising { __u8 instance; } __packed; +#define MGMT_OP_GET_ADV_SIZE_INFO 0x0040 +struct mgmt_cp_get_adv_size_info { + __u8 instance; + __le32 flags; +} __packed; +#define MGMT_GET_ADV_SIZE_INFO_SIZE 5 +struct mgmt_rp_get_adv_size_info { + __u8 instance; + __le32 flags; + __u8 max_adv_data_len; + __u8 max_scan_rsp_len; +} __packed; + +#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bonding.h b/include/net/bonding.h index c1740a2794a3..ee6c52053aa3 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -165,7 +165,8 @@ struct slave { u8 backup:1, /* indicates backup slave. Value corresponds with BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ inactive:1, /* indicates inactive slave */ - should_notify:1; /* indicateds whether the state changed */ + should_notify:1, /* indicates whether the state changed */ + should_notify_link:1; /* indicates whether the link changed */ u8 duplex; u32 original_mtu; u32 link_failure_count; @@ -246,6 +247,7 @@ struct bonding { ((struct slave *) rtnl_dereference(dev->rx_handler_data)) void bond_queue_slave_event(struct slave *slave); +void bond_lower_state_changed(struct slave *slave); struct bond_vlan_tag { __be16 vlan_proto; @@ -327,6 +329,7 @@ static inline void bond_set_active_slave(struct slave *slave) if (slave->backup) { slave->backup = 0; bond_queue_slave_event(slave); + bond_lower_state_changed(slave); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); } } @@ -336,6 +339,7 @@ static inline void bond_set_backup_slave(struct slave *slave) if (!slave->backup) { slave->backup = 1; bond_queue_slave_event(slave); + bond_lower_state_changed(slave); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); } } @@ -348,6 +352,7 @@ static inline void bond_set_slave_state(struct slave *slave, slave->backup = slave_state; if (notify) { + bond_lower_state_changed(slave); rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); bond_queue_slave_event(slave); slave->should_notify = 0; @@ -379,6 +384,7 @@ static inline void bond_slave_state_notify(struct bonding *bond) bond_for_each_slave(bond, tmp, iter) { if (tmp->should_notify) { + bond_lower_state_changed(tmp); rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC); tmp->should_notify = 0; } @@ -504,10 +510,37 @@ static inline bool bond_is_slave_inactive(struct slave *slave) return slave->inactive; } -static inline void bond_set_slave_link_state(struct slave *slave, int state) +static inline void bond_set_slave_link_state(struct slave *slave, int state, + bool notify) { + if (slave->link == state) + return; + slave->link = state; - bond_queue_slave_event(slave); + if (notify) { + bond_queue_slave_event(slave); + bond_lower_state_changed(slave); + slave->should_notify_link = 0; + } else { + if (slave->should_notify_link) + slave->should_notify_link = 0; + else + slave->should_notify_link = 1; + } +} + +static inline void bond_slave_link_notify(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->should_notify_link) { + bond_queue_slave_event(tmp); + bond_lower_state_changed(tmp); + tmp->should_notify_link = 0; + } + } } static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 1d67fb6b23a0..2fbeb1313c0f 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -72,50 +72,7 @@ static inline bool busy_loop_timeout(unsigned long end_time) return time_after(now, end_time); } -/* when used in sock_poll() nonblock is known at compile time to be true - * so the loop and end_time will be optimized out - */ -static inline bool sk_busy_loop(struct sock *sk, int nonblock) -{ - unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; - const struct net_device_ops *ops; - struct napi_struct *napi; - int rc = false; - - /* - * rcu read lock for napi hash - * bh so we don't race with net_rx_action - */ - rcu_read_lock_bh(); - - napi = napi_by_id(sk->sk_napi_id); - if (!napi) - goto out; - - ops = napi->dev->netdev_ops; - if (!ops->ndo_busy_poll) - goto out; - - do { - rc = ops->ndo_busy_poll(napi); - - if (rc == LL_FLUSH_FAILED) - break; /* permanent failure */ - - if (rc > 0) - /* local bh are disabled so it is ok to use _BH */ - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_BUSYPOLLRXPACKETS, rc); - cpu_relax(); - - } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - !need_resched() && !busy_loop_timeout(end_time)); - - rc = !skb_queue_empty(&sk->sk_receive_queue); -out: - rcu_read_unlock_bh(); - return rc; -} +bool sk_busy_loop(struct sock *sk, int nonblock); /* used in the NIC receive handler to mark the skb */ static inline void skb_mark_napi_id(struct sk_buff *skb, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2c7bdb81d30c..9bcaaf7cd15a 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2321,6 +2321,8 @@ struct cfg80211_qos_map { * the driver, and will be valid until passed to cfg80211_scan_done(). * For scan results, call cfg80211_inform_bss(); you can call this outside * the scan/scan_done bracket too. + * @abort_scan: Tell the driver to abort an ongoing scan. The driver shall + * indicate the status of the scan through cfg80211_scan_done(). * * @auth: Request to authenticate with the specified peer * (invoked with the wireless_dev mutex held) @@ -2593,6 +2595,7 @@ struct cfg80211_ops { int (*scan)(struct wiphy *wiphy, struct cfg80211_scan_request *request); + void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*auth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req); @@ -5173,8 +5176,11 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, * buffer starts, which may be @ielen if the entire (remainder) * of the buffer should be used. */ -size_t ieee80211_ie_split(const u8 *ies, size_t ielen, - const u8 *ids, int n_ids, size_t offset); +static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset) +{ + return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset); +} /** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN diff --git a/include/net/checksum.h b/include/net/checksum.h index 9fcaedf994ee..10a16b5bd1c7 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -165,7 +165,8 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, csum = csum_sub(csum, csum_partial(ptr, start, 0)); /* Set derived checksum in packet */ - delta = csum_sub(csum_fold(csum), *psum); + delta = csum_sub((__force __wsum)csum_fold(csum), + (__force __wsum)*psum); *psum = csum_fold(csum); return delta; diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index ccd6d8bffa4d..c0a92e2c286d 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -41,13 +41,12 @@ static inline u32 task_cls_classid(struct task_struct *p) return classid; } -static inline void sock_update_classid(struct sock *sk) +static inline void sock_update_classid(struct sock_cgroup_data *skcd) { u32 classid; classid = task_cls_classid(current); - if (classid != sk->sk_classid) - sk->sk_classid = classid; + sock_cgroup_set_classid(skcd, classid); } static inline u32 task_get_classid(const struct sk_buff *skb) @@ -64,17 +63,17 @@ static inline u32 task_get_classid(const struct sk_buff *skb) * softirqs always disables bh. */ if (in_serving_softirq()) { - /* If there is an sk_classid we'll use that. */ + /* If there is an sock_cgroup_classid we'll use that. */ if (!skb->sk) return 0; - classid = skb->sk->sk_classid; + classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); } return classid; } #else /* !CONFIG_CGROUP_NET_CLASSID */ -static inline void sock_update_classid(struct sock *sk) +static inline void sock_update_classid(struct sock_cgroup_data *skcd) { } diff --git a/include/net/dsa.h b/include/net/dsa.h index 82a4c6011173..26a0e86e611e 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -16,6 +16,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/of_gpio.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/ethtool.h> @@ -64,6 +65,13 @@ struct dsa_chip_data { * NULL if there is only one switch chip. */ s8 *rtable; + + /* + * A switch may have a GPIO line tied to its reset pin. Parse + * this from the device tree, and use it before performing + * switch soft reset. + */ + struct gpio_desc *reset; }; struct dsa_platform_data { @@ -109,13 +117,6 @@ struct dsa_switch_tree { s8 cpu_port; /* - * Link state polling. - */ - int link_poll_needed; - struct work_struct link_poll_work; - struct timer_list link_poll_timer; - - /* * Data for the individual switch chips. */ struct dsa_switch *ds[DSA_MAX_SWITCHES]; @@ -224,11 +225,6 @@ struct dsa_switch_driver { int regnum, u16 val); /* - * Link state polling and IRQ handling. - */ - void (*poll_link)(struct dsa_switch *ds); - - /* * Link state adjustment (called from libphy) */ void (*adjust_link)(struct dsa_switch *ds, int port, diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 1b6b6dcb018d..43c0e771f417 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) * @flags: flags * @policy: attribute validation policy * @doit: standard command callback + * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps * @ops_list: operations list @@ -122,6 +123,7 @@ struct genl_ops { const struct nla_policy *policy; int (*doit)(struct sk_buff *skb, struct genl_info *info); + int (*start)(struct netlink_callback *cb); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); diff --git a/include/net/geneve.h b/include/net/geneve.h index 3106ed6eae0d..e6c23dc765f7 100644 --- a/include/net/geneve.h +++ b/include/net/geneve.h @@ -62,6 +62,14 @@ struct genevehdr { struct geneve_opt options[]; }; +#if IS_ENABLED(CONFIG_GENEVE) +void geneve_get_rx_port(struct net_device *netdev); +#else +static inline void geneve_get_rx_port(struct net_device *netdev) +{ +} +#endif + #ifdef CONFIG_INET struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port); diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index a62a051a3a2f..c4b31601cd53 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -337,7 +337,7 @@ struct ieee802154_mlme_ops { void (*get_mac_params)(struct net_device *dev, struct ieee802154_mac_params *params); - struct ieee802154_llsec_ops *llsec; + const struct ieee802154_llsec_ops *llsec; }; static inline struct ieee802154_mlme_ops * diff --git a/include/net/ila.h b/include/net/ila.h new file mode 100644 index 000000000000..9f4f43e94ae4 --- /dev/null +++ b/include/net/ila.h @@ -0,0 +1,18 @@ +/* + * ILA kernel interface + * + * Copyright (c) 2015 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#ifndef _NET_ILA_H +#define _NET_ILA_H + +int ila_xlat_outgoing(struct sk_buff *skb); +int ila_xlat_incoming(struct sk_buff *skb); + +#endif /* _NET_ILA_H */ diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 84b20835b736..0dc0a51da38f 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner) struct ipv6hdr; -static inline int IP6_ECN_set_ce(struct ipv6hdr *iph) +/* Note: + * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE, + * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE + * In IPv6 case, no checksum compensates the change in IPv6 header, + * so we have to update skb->csum. + */ +static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) { + __be32 from, to; + if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph))) return 0; - *(__be32*)iph |= htonl(INET_ECN_CE << 20); + + from = *(__be32 *)iph; + to = from | htonl(INET_ECN_CE << 20); + *(__be32 *)iph = to; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(csum_sub(skb->csum, from), to); return 1; } @@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) case cpu_to_be16(ETH_P_IPV6): if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= skb_tail_pointer(skb)) - return IP6_ECN_set_ce(ipv6_hdr(skb)); + return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); break; } diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index ac42bbb37b2d..12aac0fd6ee7 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -99,7 +99,6 @@ struct inet_frags { void (*constructor)(struct inet_frag_queue *q, const void *arg); void (*destructor)(struct inet_frag_queue *); - void (*skb_free)(struct sk_buff *); void (*frag_expire)(unsigned long data); struct kmem_cache *frags_cachep; const char *frags_cache_name; diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 625bdf95d673..012b1f91f3ec 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -28,6 +28,7 @@ #include <net/request_sock.h> #include <net/netns/hash.h> #include <net/tcp_states.h> +#include <net/l3mdev.h> /** struct ip_options - IP Options * @@ -113,6 +114,19 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) return sk->sk_mark; } +static inline int inet_request_bound_dev_if(const struct sock *sk, + struct sk_buff *skb) +{ +#ifdef CONFIG_NET_L3_MASTER_DEV + struct net *net = sock_net(sk); + + if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) + return l3mdev_master_ifindex_by_index(net, skb->skb_iif); +#endif + + return sk->sk_bound_dev_if; +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index ff788b665277..0d0ce0b2d870 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -5,6 +5,7 @@ #include <linux/netdevice.h> #include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> +#include <net/ip_tunnels.h> #define IP6TUNNEL_ERR_TIMEO (30*HZ) @@ -80,25 +81,17 @@ __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, struct net *ip6_tnl_get_link_net(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev); +#ifdef CONFIG_INET static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device *dev) { - struct net_device_stats *stats = &dev->stats; int pkt_len, err; pkt_len = skb->len - skb_inner_network_offset(skb); err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); - - if (net_xmit_eval(err) == 0) { - struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); - u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += pkt_len; - tstats->tx_packets++; - u64_stats_update_end(&tstats->syncp); - put_cpu_ptr(tstats); - } else { - stats->tx_errors++; - stats->tx_aborted_errors++; - } + if (unlikely(net_xmit_eval(err))) + pkt_len = -1; + iptunnel_xmit_stats(dev, pkt_len); } #endif +#endif diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9f4df68105ab..7029527725dd 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -325,7 +325,8 @@ extern u32 fib_multipath_secret __read_mostly; static inline int fib_multipath_hash(__be32 saddr, __be32 daddr) { - return jhash_2words(saddr, daddr, fib_multipath_secret) >> 1; + return jhash_2words((__force u32)saddr, (__force u32)daddr, + fib_multipath_secret) >> 1; } void fib_select_multipath(struct fib_result *res, int hash); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 62a750a6a8f8..6db96ea0144f 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -273,32 +273,34 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, } int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); -int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, - __be32 src, __be32 dst, u8 proto, - u8 tos, u8 ttl, __be16 df, bool xnet); +void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, u8 proto, + u8 tos, u8 ttl, __be16 df, bool xnet); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, int gso_type_mask); -static inline void iptunnel_xmit_stats(int err, - struct net_device_stats *err_stats, - struct pcpu_sw_netstats __percpu *stats) +static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) { - if (err > 0) { - struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats); + if (pkt_len > 0) { + struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += err; + tstats->tx_bytes += pkt_len; tstats->tx_packets++; u64_stats_update_end(&tstats->syncp); put_cpu_ptr(tstats); - } else if (err < 0) { - err_stats->tx_errors++; - err_stats->tx_aborted_errors++; } else { - err_stats->tx_dropped++; + struct net_device_stats *err_stats = &dev->stats; + + if (pkt_len < 0) { + err_stats->tx_errors++; + err_stats->tx_aborted_errors++; + } else { + err_stats->tx_dropped++; + } } } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 9a5c9f013784..6570f379aba2 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -401,6 +401,21 @@ static inline void ipv6_addr_prefix(struct in6_addr *pfx, pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); } +static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, + const struct in6_addr *pfx, + int plen) +{ + /* caller must guarantee 0 <= plen <= 128 */ + int o = plen >> 3, + b = plen & 0x7; + + memcpy(addr->s6_addr, pfx, o); + if (b != 0) { + addr->s6_addr[o] &= ~(0xff00 >> b); + addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); + } +} + static inline void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl) { diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 774d85b2d5d9..5567d46b3cff 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -29,7 +29,7 @@ struct l3mdev_ops { /* IPv4 ops */ struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev, const struct flowi4 *fl4); - void (*l3mdev_get_saddr)(struct net_device *dev, + int (*l3mdev_get_saddr)(struct net_device *dev, struct flowi4 *fl4); /* IPv6 ops */ @@ -51,6 +51,24 @@ static inline int l3mdev_master_ifindex(struct net_device *dev) return ifindex; } +static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) +{ + struct net_device *dev; + int rc = 0; + + if (likely(ifindex)) { + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + rc = l3mdev_master_ifindex_rcu(dev); + + rcu_read_unlock(); + } + + return rc; +} + /* get index of an interface to use for FIB lookups. For devices * enslaved to an L3 master device FIB lookups are based on the * master index @@ -112,10 +130,11 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex) return rc; } -static inline void l3mdev_get_saddr(struct net *net, int ifindex, - struct flowi4 *fl4) +static inline int l3mdev_get_saddr(struct net *net, int ifindex, + struct flowi4 *fl4) { struct net_device *dev; + int rc = 0; if (ifindex) { @@ -124,11 +143,13 @@ static inline void l3mdev_get_saddr(struct net *net, int ifindex, dev = dev_get_by_index_rcu(net, ifindex); if (dev && netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_saddr) { - dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); + rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); } rcu_read_unlock(); } + + return rc; } static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev, @@ -167,6 +188,11 @@ static inline int l3mdev_master_ifindex(struct net_device *dev) return 0; } +static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) +{ + return 0; +} + static inline int l3mdev_fib_oif_rcu(struct net_device *dev) { return dev ? dev->ifindex : 0; @@ -200,9 +226,10 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex) return false; } -static inline void l3mdev_get_saddr(struct net *net, int ifindex, - struct flowi4 *fl4) +static inline int l3mdev_get_saddr(struct net *net, int ifindex, + struct flowi4 *fl4) { + return 0; } static inline diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 760bc4d5a2cf..7c30faff245f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1321,11 +1321,15 @@ struct ieee80211_channel_switch { * interface. This flag should be set during interface addition, * but may be set/cleared as late as authentication to an AP. It is * only valid for managed/station mode interfaces. + * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes + * and send P2P_PS notification to the driver if NOA changed, even + * this is not pure P2P vif. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), + IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), }; /** @@ -1901,6 +1905,11 @@ struct ieee80211_txq { * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status * for sent beacons. * + * @IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR: Hardware (or driver) requires that each + * station has a unique address, i.e. each station entry can be identified + * by just its MAC address; this prevents, for example, the same station + * from connecting to two virtual AP interfaces at the same time. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -1936,6 +1945,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TDLS_WIDER_BW, IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_BEACON_TX_STATUS, + IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -4863,6 +4873,28 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, void ieee80211_sta_eosp(struct ieee80211_sta *pubsta); /** + * ieee80211_send_eosp_nullfunc - ask mac80211 to send NDP with EOSP + * @pubsta: the station + * @tid: the tid of the NDP + * + * Sometimes the device understands that it needs to close + * the Service Period unexpectedly. This can happen when + * sending frames that are filling holes in the BA window. + * In this case, the device can ask mac80211 to send a + * Nullfunc frame with EOSP set. When that happens, the + * driver must have called ieee80211_sta_set_buffered() to + * let mac80211 know that there are no buffered frames any + * more, otherwise mac80211 will get the more_data bit wrong. + * The low level driver must have made sure that the frame + * will be sent despite the station being in power-save. + * Mac80211 won't call allow_buffered_frames(). + * Note that calling this function, doesn't exempt the driver + * from closing the EOSP properly, it will still have to call + * ieee80211_sta_eosp when the NDP is sent. + */ +void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid); + +/** * ieee80211_iter_keys - iterate keys programmed into the device * @hw: pointer obtained from ieee80211_alloc_hw() * @vif: virtual interface to iterate, may be %NULL for all @@ -4890,6 +4922,30 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw, void *iter_data); /** + * ieee80211_iter_keys_rcu - iterate keys programmed into the device + * @hw: pointer obtained from ieee80211_alloc_hw() + * @vif: virtual interface to iterate, may be %NULL for all + * @iter: iterator function that will be called for each key + * @iter_data: custom data to pass to the iterator function + * + * This function can be used to iterate all the keys known to + * mac80211, even those that weren't previously programmed into + * the device. Note that due to locking reasons, keys of station + * in removal process will be skipped. + * + * This function requires being called in an RCU critical section, + * and thus iter must be atomic. + */ +void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data); + +/** * ieee80211_iter_chan_contexts_atomic - iterate channel contexts * @hw: pointre obtained from ieee80211_alloc_hw(). * @iter: iterator function diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 2dcea635ecce..4089abc6e9c0 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -121,6 +121,9 @@ struct net { #if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT) struct list_head nfnl_acct_list; #endif +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + struct list_head nfct_timeout_list; +#endif #endif #ifdef CONFIG_WEXT_CORE struct sk_buff_head wext_nlevents; diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index fb7da5bb76cc..ddf162f7966f 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -5,8 +5,7 @@ void nf_defrag_ipv6_enable(void); int nf_ct_frag6_init(void); void nf_ct_frag6_cleanup(void); -struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user); -void nf_ct_frag6_consume_orig(struct sk_buff *skb); +int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user); struct inet_frags_ctl; diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index f72be38860a7..5cc5e9e6171a 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -104,7 +104,7 @@ static inline void nf_conntrack_timeout_fini(void) #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT -extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(const char *name); +extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name); extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout); #endif diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h new file mode 100644 index 000000000000..397dcae349f9 --- /dev/null +++ b/include/net/netfilter/nf_dup_netdev.h @@ -0,0 +1,6 @@ +#ifndef _NF_DUP_NETDEV_H_ +#define _NF_DUP_NETDEV_H_ + +void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif); + +#endif diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 4bd7508bedc9..f6b1daf2e698 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -19,8 +19,6 @@ struct nft_pktinfo { const struct net_device *out; u8 pf; u8 hook; - u8 nhoff; - u8 thoff; u8 tprot; /* for x_tables compatibility */ struct xt_action_param xt; @@ -293,6 +291,8 @@ void nft_unregister_set(struct nft_set_ops *ops); * @timeout: default timeout value in msecs * @gc_int: garbage collection interval in msecs * @policy: set parameterization (see enum nft_set_policies) + * @udlen: user data length + * @udata: user data * @ops: set ops * @pnet: network namespace * @flags: set flags @@ -312,6 +312,8 @@ struct nft_set { u64 timeout; u32 gc_int; u16 policy; + u16 udlen; + unsigned char *udata; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; possible_net_t pnet; @@ -823,10 +825,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai return container_of(chain, struct nft_base_chain, chain); } -int nft_register_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops); -void nft_unregister_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops); +int __nft_release_basechain(struct nft_ctx *ctx); unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); @@ -882,7 +881,7 @@ struct nft_af_info { }; int nft_register_afinfo(struct net *, struct nft_af_info *); -void nft_unregister_afinfo(struct nft_af_info *); +void nft_unregister_afinfo(struct net *, struct nft_af_info *); int nft_register_chain_type(const struct nf_chain_type *); void nft_unregister_chain_type(const struct nf_chain_type *); @@ -890,6 +889,38 @@ void nft_unregister_chain_type(const struct nf_chain_type *); int nft_register_expr(struct nft_expr_type *); void nft_unregister_expr(struct nft_expr_type *); +int nft_verdict_dump(struct sk_buff *skb, int type, + const struct nft_verdict *v); + +/** + * struct nft_traceinfo - nft tracing information and state + * + * @pkt: pktinfo currently processed + * @basechain: base chain currently processed + * @chain: chain currently processed + * @rule: rule that was evaluated + * @verdict: verdict given by rule + * @type: event type (enum nft_trace_types) + * @packet_dumped: packet headers sent in a previous traceinfo message + * @trace: other struct members are initialised + */ +struct nft_traceinfo { + const struct nft_pktinfo *pkt; + const struct nft_base_chain *basechain; + const struct nft_chain *chain; + const struct nft_rule *rule; + const struct nft_verdict *verdict; + enum nft_trace_types type; + bool packet_dumped; + bool trace; +}; + +void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, + const struct nft_verdict *verdict, + const struct nft_chain *basechain); + +void nft_trace_notify(struct nft_traceinfo *info); + #define nft_dereference(p) \ nfnl_dereference(p, NFNL_SUBSYS_NFTABLES) diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index c6f400cfaac8..a9060dd99db7 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -47,7 +47,17 @@ struct nft_payload { enum nft_registers dreg:8; }; +struct nft_payload_set { + enum nft_payload_bases base:8; + u8 offset; + u8 len; + enum nft_registers sreg:8; + u8 csum_type; + u8 csum_offset; +}; + extern const struct nft_expr_ops nft_payload_fast_ops; +extern struct static_key_false nft_trace_enabled; int nft_payload_module_init(void); void nft_payload_module_exit(void); diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 711887a09e91..d27588c8dbd9 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -33,4 +33,7 @@ void nft_meta_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); +void nft_meta_set_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr); + #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index c68926b4899c..2b7907a35568 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -86,11 +86,18 @@ struct netns_ipv4 { int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; +#ifdef CONFIG_NET_L3_MASTER_DEV + int sysctl_tcp_l3mdev_accept; +#endif int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_probes; + int sysctl_tcp_keepalive_intvl; + struct ping_group_range ping_group_range; atomic_t dev_addr_genid; diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 8ba379f9e467..c501d67172b1 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -89,6 +89,13 @@ struct netns_sctp { int pf_retrans; /* + * Disable Potentially-Failed feature, the feature is enabled by default + * pf_enable - 0 : disable pf + * - >0 : enable pf + */ + int pf_enable; + + /* * Policy for preforming sctp/socket accounting * 0 - do socket level accounting, all assocs share sk_sndbuf * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h index f2a9597ff53c..604190596cde 100644 --- a/include/net/netprio_cgroup.h +++ b/include/net/netprio_cgroup.h @@ -25,8 +25,6 @@ struct netprio_map { u32 priomap[]; }; -void sock_update_netprioidx(struct sock *sk); - static inline u32 task_netprioidx(struct task_struct *p) { struct cgroup_subsys_state *css; @@ -38,13 +36,25 @@ static inline u32 task_netprioidx(struct task_struct *p) rcu_read_unlock(); return idx; } + +static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) +{ + if (in_interrupt()) + return; + + sock_cgroup_set_prioidx(skcd, task_netprioidx(current)); +} + #else /* !CONFIG_CGROUP_NET_PRIO */ + static inline u32 task_netprioidx(struct task_struct *p) { return 0; } -#define sock_update_netprioidx(sk) +static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) +{ +} #endif /* CONFIG_CGROUP_NET_PRIO */ #endif /* _NET_CLS_CGROUP_H */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index dcfcfc9c00bf..1a3de8b34ad2 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -299,6 +299,7 @@ void nfc_driver_failure(struct nfc_dev *dev, int err); int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction); +int nfc_se_connectivity(struct nfc_dev *dev, u8 se_idx); int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type); int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx); diff --git a/include/net/protocol.h b/include/net/protocol.h index d6fcc1fcdb5b..da689f5432de 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -107,7 +107,7 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num); void inet_register_protosw(struct inet_protosw *p); void inet_unregister_protosw(struct inet_protosw *p); -int udp_add_offload(struct udp_offload *prot); +int udp_add_offload(struct net *net, struct udp_offload *prot); void udp_del_offload(struct udp_offload *prot); #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/net/request_sock.h b/include/net/request_sock.h index a0dde04eb178..f49759decb28 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -68,7 +68,7 @@ struct request_sock { u32 peer_secid; }; -static inline struct request_sock *inet_reqsk(struct sock *sk) +static inline struct request_sock *inet_reqsk(const struct sock *sk) { return (struct request_sock *)sk; } diff --git a/include/net/route.h b/include/net/route.h index ee81307863d5..a3b9ef74a389 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -283,7 +283,12 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, sport, dport, sk); if (!src && oif) { - l3mdev_get_saddr(net, oif, fl4); + int rc; + + rc = l3mdev_get_saddr(net, oif, fl4); + if (rc < 0) + return ERR_PTR(rc); + src = fl4->saddr; } if (!dst || !src) { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index b2a8e6338576..636a362a0e03 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -407,6 +407,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); +static inline bool skb_at_tc_ingress(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + return G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + return false; +#endif +} + /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ce13cf20f625..835aa2ed9870 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -126,8 +126,6 @@ int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg); */ int sctp_rcv(struct sk_buff *skb); void sctp_v4_err(struct sk_buff *skb, u32 info); -void sctp_hash_established(struct sctp_association *); -void sctp_unhash_established(struct sctp_association *); void sctp_hash_endpoint(struct sctp_endpoint *); void sctp_unhash_endpoint(struct sctp_endpoint *); struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, @@ -143,6 +141,17 @@ void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_transport *t); void sctp_backlog_migrate(struct sctp_association *assoc, struct sock *oldsk, struct sock *newsk); +int sctp_transport_hashtable_init(void); +void sctp_transport_hashtable_destroy(void); +void sctp_hash_transport(struct sctp_transport *t); +void sctp_unhash_transport(struct sctp_transport *t); +struct sctp_transport *sctp_addrs_lookup_transport( + struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr); +struct sctp_transport *sctp_epaddr_lookup_transport( + const struct sctp_endpoint *ep, + const union sctp_addr *paddr); /* * sctp/proc.c @@ -519,25 +528,6 @@ static inline int sctp_ep_hashfn(struct net *net, __u16 lport) return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1); } -/* This is the hash function for the association hash table. */ -static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport) -{ - int h = (lport << 16) + rport + net_hash_mix(net); - h ^= h>>8; - return h & (sctp_assoc_hashsize - 1); -} - -/* This is the hash function for the association hash table. This is - * not used yet, but could be used as a better hash function when - * we have a vtag. - */ -static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag) -{ - int h = (lport << 16) + rport; - h ^= vtag; - return h & (sctp_assoc_hashsize - 1); -} - #define sctp_for_each_hentry(epb, head) \ hlist_for_each_entry(epb, head, node) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index eea9bdeecba2..20e72129be1c 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -48,6 +48,7 @@ #define __sctp_structs_h__ #include <linux/ktime.h> +#include <linux/rhashtable.h> #include <linux/socket.h> /* linux/in.h needs this!! */ #include <linux/in.h> /* We get struct sockaddr_in. */ #include <linux/in6.h> /* We get struct in6_addr */ @@ -119,14 +120,13 @@ extern struct sctp_globals { /* This is the hash of all endpoints. */ struct sctp_hashbucket *ep_hashtable; - /* This is the hash of all associations. */ - struct sctp_hashbucket *assoc_hashtable; /* This is the sctp port control hash. */ struct sctp_bind_hashbucket *port_hashtable; + /* This is the hash of all transports. */ + struct rhashtable transport_hashtable; /* Sizes of above hashtables. */ int ep_hashsize; - int assoc_hashsize; int port_hashsize; /* Default initialization values to be applied to new associations. */ @@ -143,10 +143,9 @@ extern struct sctp_globals { #define sctp_address_families (sctp_globals.address_families) #define sctp_ep_hashsize (sctp_globals.ep_hashsize) #define sctp_ep_hashtable (sctp_globals.ep_hashtable) -#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize) -#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) #define sctp_port_hashsize (sctp_globals.port_hashsize) #define sctp_port_hashtable (sctp_globals.port_hashtable) +#define sctp_transport_hashtable (sctp_globals.transport_hashtable) #define sctp_checksum_disable (sctp_globals.checksum_disable) /* SCTP Socket type: UDP or TCP style. */ @@ -753,6 +752,7 @@ static inline int sctp_packet_empty(struct sctp_packet *packet) struct sctp_transport { /* A list of transports. */ struct list_head transports; + struct rhash_head node; /* Reference counting. */ atomic_t refcnt; diff --git a/include/net/sock.h b/include/net/sock.h index 14d3c0734007..b9e7b3d863a0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -58,6 +58,8 @@ #include <linux/memcontrol.h> #include <linux/static_key.h> #include <linux/sched.h> +#include <linux/wait.h> +#include <linux/cgroup-defs.h> #include <linux/filter.h> #include <linux/rculist_nulls.h> @@ -69,22 +71,6 @@ #include <net/tcp_states.h> #include <linux/net_tstamp.h> -struct cgroup; -struct cgroup_subsys; -#ifdef CONFIG_NET -int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); -void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); -#else -static inline -int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) -{ - return 0; -} -static inline -void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) -{ -} -#endif /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of @@ -243,7 +229,6 @@ struct sock_common { /* public: */ }; -struct cg_proto; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock @@ -287,7 +272,6 @@ struct cg_proto; * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_priority: %SO_PRIORITY setting - * @sk_cgrp_prioidx: socket group's priority map index * @sk_type: socket type (%SOCK_STREAM, etc) * @sk_protocol: which protocol this socket belongs in this network family * @sk_peer_pid: &struct pid for this socket's peer @@ -308,8 +292,8 @@ struct cg_proto; * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules * @sk_mark: generic packet mark - * @sk_classid: this socket's cgroup classid - * @sk_cgrp: this socket's cgroup-specific proto data + * @sk_cgrp_data: cgroup data for this cgroup + * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -317,6 +301,7 @@ struct cg_proto; * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) * @sk_backlog_rcv: callback to process the backlog * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 + * @sk_reuseport_cb: reuseport group container */ struct sock { /* @@ -425,9 +410,7 @@ struct sock { u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; -#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) - __u32 sk_cgrp_prioidx; -#endif + __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; @@ -445,11 +428,8 @@ struct sock { #ifdef CONFIG_SECURITY void *sk_security; #endif - __u32 sk_mark; -#ifdef CONFIG_CGROUP_NET_CLASSID - u32 sk_classid; -#endif - struct cg_proto *sk_cgrp; + struct sock_cgroup_data sk_cgrp_data; + struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk); void (*sk_write_space)(struct sock *sk); @@ -457,6 +437,7 @@ struct sock { int (*sk_backlog_rcv)(struct sock *sk, struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); + struct sock_reuseport __rcu *sk_reuseport_cb; }; #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) @@ -778,9 +759,9 @@ static inline int sk_memalloc_socks(void) #endif -static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask) +static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) { - return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); + return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); } static inline void sk_acceptq_removed(struct sock *sk) @@ -1067,6 +1048,7 @@ struct proto { void (*destroy_cgroup)(struct mem_cgroup *memcg); struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); #endif + int (*diag_destroy)(struct sock *sk, int err); }; int proto_register(struct proto *prot, int alloc_slab); @@ -1097,23 +1079,6 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET) -extern struct static_key memcg_socket_limit_enabled; -static inline struct cg_proto *parent_cg_proto(struct proto *proto, - struct cg_proto *cg_proto) -{ - return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); -} -#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled) -#else -#define mem_cgroup_sockets_enabled 0 -static inline struct cg_proto *parent_cg_proto(struct proto *proto, - struct cg_proto *cg_proto) -{ - return NULL; -} -#endif - static inline bool sk_stream_memory_free(const struct sock *sk) { if (sk->sk_wmem_queued >= sk->sk_sndbuf) @@ -1140,8 +1105,9 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) if (!sk->sk_prot->memory_pressure) return false; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) + return true; return !!*sk->sk_prot->memory_pressure; } @@ -1155,15 +1121,6 @@ static inline void sk_leave_memory_pressure(struct sock *sk) if (*memory_pressure) *memory_pressure = 0; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - struct proto *prot = sk->sk_prot; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->memory_pressure = 0; - } - } static inline void sk_enter_memory_pressure(struct sock *sk) @@ -1171,116 +1128,46 @@ static inline void sk_enter_memory_pressure(struct sock *sk) if (!sk->sk_prot->enter_memory_pressure) return; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - struct proto *prot = sk->sk_prot; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - cg_proto->memory_pressure = 1; - } - sk->sk_prot->enter_memory_pressure(sk); } static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long *prot = sk->sk_prot->sysctl_mem; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - prot = sk->sk_cgrp->sysctl_mem; - return prot[index]; -} - -static inline void memcg_memory_allocated_add(struct cg_proto *prot, - unsigned long amt, - int *parent_status) -{ - page_counter_charge(&prot->memory_allocated, amt); - - if (page_counter_read(&prot->memory_allocated) > - prot->memory_allocated.limit) - *parent_status = OVER_LIMIT; -} - -static inline void memcg_memory_allocated_sub(struct cg_proto *prot, - unsigned long amt) -{ - page_counter_uncharge(&prot->memory_allocated, amt); + return sk->sk_prot->sysctl_mem[index]; } static inline long sk_memory_allocated(const struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return page_counter_read(&sk->sk_cgrp->memory_allocated); - - return atomic_long_read(prot->memory_allocated); + return atomic_long_read(sk->sk_prot->memory_allocated); } static inline long -sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) +sk_memory_allocated_add(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); - /* update the root cgroup regardless */ - atomic_long_add_return(amt, prot->memory_allocated); - return page_counter_read(&sk->sk_cgrp->memory_allocated); - } - - return atomic_long_add_return(amt, prot->memory_allocated); + return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); } static inline void sk_memory_allocated_sub(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - memcg_memory_allocated_sub(sk->sk_cgrp, amt); - - atomic_long_sub(amt, prot->memory_allocated); + atomic_long_sub(amt, sk->sk_prot->memory_allocated); } static inline void sk_sockets_allocated_dec(struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_dec(&cg_proto->sockets_allocated); - } - - percpu_counter_dec(prot->sockets_allocated); + percpu_counter_dec(sk->sk_prot->sockets_allocated); } static inline void sk_sockets_allocated_inc(struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - struct cg_proto *cg_proto = sk->sk_cgrp; - - for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_inc(&cg_proto->sockets_allocated); - } - - percpu_counter_inc(prot->sockets_allocated); + percpu_counter_inc(sk->sk_prot->sockets_allocated); } static inline int sk_sockets_allocated_read_positive(struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); - - return percpu_counter_read_positive(prot->sockets_allocated); + return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); } static inline int @@ -1798,6 +1685,15 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) sk->sk_route_caps &= ~flags; } +static inline bool sk_check_csum_caps(struct sock *sk) +{ + return (sk->sk_route_caps & NETIF_F_HW_CSUM) || + (sk->sk_family == PF_INET && + (sk->sk_route_caps & NETIF_F_IP_CSUM)) || + (sk->sk_family == PF_INET6 && + (sk->sk_route_caps & NETIF_F_IPV6_CSUM)); +} + static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, struct iov_iter *from, char *to, int copy, int offset) @@ -1883,12 +1779,12 @@ static inline bool sk_has_allocations(const struct sock *sk) } /** - * wq_has_sleeper - check if there are any waiting processes + * skwq_has_sleeper - check if there are any waiting processes * @wq: struct socket_wq * * Returns true if socket_wq has waiting processes * - * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory + * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory * barrier call. They were added due to the race found within the tcp code. * * Consider following tcp code paths: @@ -1914,15 +1810,9 @@ static inline bool sk_has_allocations(const struct sock *sk) * data on the socket. * */ -static inline bool wq_has_sleeper(struct socket_wq *wq) +static inline bool skwq_has_sleeper(struct socket_wq *wq) { - /* We need to be sure we are in sync with the - * add_wait_queue modifications to the wait queue. - * - * This memory barrier is paired in the sock_poll_wait. - */ - smp_mb(); - return wq && waitqueue_active(&wq->wait); + return wq && wq_has_sleeper(&wq->wait); } /** diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h new file mode 100644 index 000000000000..7dda3d7adba8 --- /dev/null +++ b/include/net/sock_reuseport.h @@ -0,0 +1,28 @@ +#ifndef _SOCK_REUSEPORT_H +#define _SOCK_REUSEPORT_H + +#include <linux/filter.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/sock.h> + +struct sock_reuseport { + struct rcu_head rcu; + + u16 max_socks; /* length of socks */ + u16 num_socks; /* elements in socks */ + struct bpf_prog __rcu *prog; /* optional BPF sock selector */ + struct sock *socks[0]; /* array of sock pointers */ +}; + +extern int reuseport_alloc(struct sock *sk); +extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2); +extern void reuseport_detach_sock(struct sock *sk); +extern struct sock *reuseport_select_sock(struct sock *sk, + u32 hash, + struct sk_buff *skb, + int hdr_len); +extern struct bpf_prog *reuseport_attach_prog(struct sock *sk, + struct bpf_prog *prog); + +#endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 1d22ce9f352e..d451122e8404 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -47,9 +47,11 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_STP_STATE, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, }; struct switchdev_attr { + struct net_device *orig_dev; enum switchdev_attr_id id; u32 flags; union { @@ -57,6 +59,7 @@ struct switchdev_attr { u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ u32 ageing_time; /* BRIDGE_AGEING_TIME */ + bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ } u; }; @@ -65,9 +68,11 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_PORT_VLAN, SWITCHDEV_OBJ_ID_IPV4_FIB, SWITCHDEV_OBJ_ID_PORT_FDB, + SWITCHDEV_OBJ_ID_PORT_MDB, }; struct switchdev_obj { + struct net_device *orig_dev; enum switchdev_obj_id id; u32 flags; }; @@ -109,6 +114,16 @@ struct switchdev_obj_port_fdb { #define SWITCHDEV_OBJ_PORT_FDB(obj) \ container_of(obj, struct switchdev_obj_port_fdb, obj) +/* SWITCHDEV_OBJ_ID_PORT_MDB */ +struct switchdev_obj_port_mdb { + struct switchdev_obj obj; + unsigned char addr[ETH_ALEN]; + u16 vid; +}; + +#define SWITCHDEV_OBJ_PORT_MDB(obj) \ + container_of(obj, struct switchdev_obj_port_mdb, obj) + void switchdev_trans_item_enqueue(struct switchdev_trans *trans, void *data, void (*destructor)(void const *), struct switchdev_trans_item *tritem); diff --git a/include/net/tcp.h b/include/net/tcp.h index f80e74c5ad18..8ea19977ea53 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -240,9 +240,6 @@ extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_keepalive_time; -extern int sysctl_tcp_keepalive_probes; -extern int sysctl_tcp_keepalive_intvl; extern int sysctl_tcp_syn_retries; extern int sysctl_tcp_synack_retries; extern int sysctl_tcp_retries1; @@ -292,8 +289,9 @@ extern int tcp_memory_pressure; /* optimized version of sk_under_memory_pressure() for TCP sockets */ static inline bool tcp_under_memory_pressure(const struct sock *sk) { - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) + return true; return tcp_memory_pressure; } @@ -1170,6 +1168,8 @@ void tcp_set_state(struct sock *sk, int state); void tcp_done(struct sock *sk); +int tcp_abort(struct sock *sk, int err); + static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) { rx_opt->dsack = 0; @@ -1223,17 +1223,23 @@ void tcp_enter_memory_pressure(struct sock *sk); static inline int keepalive_intvl_when(const struct tcp_sock *tp) { - return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; } static inline int keepalive_time_when(const struct tcp_sock *tp) { - return tp->keepalive_time ? : sysctl_tcp_keepalive_time; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; } static inline int keepalive_probes(const struct tcp_sock *tp) { - return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; } static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) @@ -1618,6 +1624,18 @@ static inline void tcp_highest_sack_combine(struct sock *sk, tcp_sk(sk)->highest_sack = new; } +/* This helper checks if socket has IP_TRANSPARENT set */ +static inline bool inet_sk_transparent(const struct sock *sk) +{ + switch (sk->sk_state) { + case TCP_TIME_WAIT: + return inet_twsk(sk)->tw_transparent; + case TCP_NEW_SYN_RECV: + return inet_rsk(inet_reqsk(sk))->no_srccheck; + } + return inet_sk(sk)->transparent; +} + /* Determines whether this is a thin stream (which may suffer from * increased latency). Used to trigger latency-reducing mechanisms. */ diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 05b94d9453de..01ff7c6efada 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h @@ -1,7 +1,9 @@ #ifndef _TCP_MEMCG_H #define _TCP_MEMCG_H -struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); +struct cgroup_subsys; +struct mem_cgroup; + int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); void tcp_destroy_cgroup(struct mem_cgroup *memcg); #endif /* _TCP_MEMCG_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 6d4ed18e1427..2842541e28e7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -191,7 +191,7 @@ static inline void udp_lib_close(struct sock *sk, long timeout) } int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*)(const struct sock *, const struct sock *), + int (*)(const struct sock *, const struct sock *, bool), unsigned int hash2_nulladdr); u32 udp_flow_hashrnd(void); @@ -258,7 +258,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, - struct udp_table *tbl); + struct udp_table *tbl, struct sk_buff *skb); struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, @@ -266,7 +266,8 @@ struct sock *udp6_lib_lookup(struct net *net, struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, - int dif, struct udp_table *tbl); + int dif, struct udp_table *tbl, + struct sk_buff *skb); /* * SNMP statistics for UDP and UDP-Lite diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index cb2f89f20f5c..cca2ad3082c3 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -78,10 +78,10 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *sock_cfg); /* Transmit the skb using UDP encapsulation. */ -int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, - __be16 df, __be16 src_port, __be16 dst_port, - bool xnet, bool nocheck); +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck); #if IS_ENABLED(CONFIG_IPV6) int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, diff --git a/include/net/vxlan.h b/include/net/vxlan.h index e289ada6adf6..0fb86442544b 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -232,7 +232,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, skb->inner_protocol != htons(ETH_P_TEB) || (skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) - return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 0d2607d12387..42a84ef42683 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -344,6 +344,43 @@ struct ssp_response_iu { u8 sense_data[0]; } __attribute__ ((packed)); +struct ssp_command_iu { + u8 lun[8]; + u8 _r_a; + + union { + struct { + u8 attr:3; + u8 prio:4; + u8 efb:1; + }; + u8 efb_prio_attr; + }; + + u8 _r_b; + + u8 _r_c:2; + u8 add_cdb_len:6; + + u8 cdb[16]; + u8 add_cdb[0]; +} __attribute__ ((packed)); + +struct xfer_rdy_iu { + __be32 requested_offset; + __be32 write_data_len; + __be32 _r_a; +} __attribute__ ((packed)); + +struct ssp_tmf_iu { + u8 lun[8]; + u16 _r_a; + u8 tmf; + u8 _r_b; + __be16 tag; + u8 _r_c[14]; +} __attribute__ ((packed)); + /* ---------- SMP ---------- */ struct report_general_resp { @@ -538,6 +575,43 @@ struct ssp_response_iu { u8 sense_data[0]; } __attribute__ ((packed)); +struct ssp_command_iu { + u8 lun[8]; + u8 _r_a; + + union { + struct { + u8 efb:1; + u8 prio:4; + u8 attr:3; + }; + u8 efb_prio_attr; + }; + + u8 _r_b; + + u8 add_cdb_len:6; + u8 _r_c:2; + + u8 cdb[16]; + u8 add_cdb[0]; +} __attribute__ ((packed)); + +struct xfer_rdy_iu { + __be32 requested_offset; + __be32 write_data_len; + __be32 _r_a; +} __attribute__ ((packed)); + +struct ssp_tmf_iu { + u8 lun[8]; + u16 _r_a; + u8 tmf; + u8 _r_b; + __be16 tag; + u8 _r_c[14]; +} __attribute__ ((packed)); + /* ---------- SMP ---------- */ struct report_general_resp { diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index f8170e90b49d..56710e03101c 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -12,8 +12,6 @@ extern size_t __scsi_format_command(char *, size_t, const unsigned char *, size_t); extern void scsi_show_extd_sense(const struct scsi_device *, const char *, unsigned char, unsigned char); -extern void scsi_show_sense_hdr(const struct scsi_device *, const char *, - const struct scsi_sense_hdr *); extern void scsi_print_sense_hdr(const struct scsi_device *, const char *, const struct scsi_sense_hdr *); extern void scsi_print_sense(const struct scsi_cmnd *); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index fe89d7cd67b9..f63a16760ae9 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -109,6 +109,7 @@ struct scsi_device { char type; char scsi_level; char inq_periph_qual; /* PQ from INQUIRY data */ + struct mutex inquiry_mutex; unsigned char inquiry_len; /* valid bytes in 'inquiry' */ unsigned char * inquiry; /* INQUIRY response data */ const char * vendor; /* [back_compat] point into 'inquiry' ... */ @@ -117,9 +118,9 @@ struct scsi_device { #define SCSI_VPD_PG_LEN 255 int vpd_pg83_len; - unsigned char *vpd_pg83; + unsigned char __rcu *vpd_pg83; int vpd_pg80_len; - unsigned char *vpd_pg80; + unsigned char __rcu *vpd_pg80; unsigned char current_tag; /* current tag */ struct scsi_target *sdev_target; /* used only for single_lun */ @@ -414,6 +415,8 @@ static inline int scsi_execute_req(struct scsi_device *sdev, } extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); +extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t); +extern int scsi_vpd_tpg_id(struct scsi_device *, int *); #ifdef CONFIG_PM extern int scsi_autopm_get_device(struct scsi_device *); diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 0bd71e2702e3..13c0b2ba1b6c 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -10,6 +10,15 @@ struct scsi_transport_template; struct sas_rphy; struct request; +#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS) +static inline int is_sas_attached(struct scsi_device *sdev) +{ + return 0; +} +#else +extern int is_sas_attached(struct scsi_device *sdev); +#endif + static inline int sas_protocol_ata(enum sas_protocol proto) { return ((proto & SAS_PROTOCOL_SATA) || @@ -180,6 +189,7 @@ extern int sas_phy_add(struct sas_phy *); extern void sas_phy_delete(struct sas_phy *); extern int scsi_is_sas_phy(const struct device *); +u64 sas_get_address(struct scsi_device *); unsigned int sas_tlr_supported(struct scsi_device *); unsigned int sas_is_tlr_enabled(struct scsi_device *); void sas_disable_tlr(struct scsi_device *); diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h new file mode 100644 index 000000000000..bedbff891423 --- /dev/null +++ b/include/soc/fsl/qe/immap_qe.h @@ -0,0 +1,491 @@ +/* + * QUICC Engine (QE) Internal Memory Map. + * The Internal Memory Map for devices with QE on them. This + * is the superset of all QE devices (8360, etc.). + + * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_IMMAP_QE_H +#define _ASM_POWERPC_IMMAP_QE_H +#ifdef __KERNEL__ + +#include <linux/kernel.h> +#include <asm/io.h> + +#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */ + +/* QE I-RAM */ +struct qe_iram { + __be32 iadd; /* I-RAM Address Register */ + __be32 idata; /* I-RAM Data Register */ + u8 res0[0x04]; + __be32 iready; /* I-RAM Ready Register */ + u8 res1[0x70]; +} __attribute__ ((packed)); + +/* QE Interrupt Controller */ +struct qe_ic_regs { + __be32 qicr; + __be32 qivec; + __be32 qripnr; + __be32 qipnr; + __be32 qipxcc; + __be32 qipycc; + __be32 qipwcc; + __be32 qipzcc; + __be32 qimr; + __be32 qrimr; + __be32 qicnr; + u8 res0[0x4]; + __be32 qiprta; + __be32 qiprtb; + u8 res1[0x4]; + __be32 qricr; + u8 res2[0x20]; + __be32 qhivec; + u8 res3[0x1C]; +} __attribute__ ((packed)); + +/* Communications Processor */ +struct cp_qe { + __be32 cecr; /* QE command register */ + __be32 ceccr; /* QE controller configuration register */ + __be32 cecdr; /* QE command data register */ + u8 res0[0xA]; + __be16 ceter; /* QE timer event register */ + u8 res1[0x2]; + __be16 cetmr; /* QE timers mask register */ + __be32 cetscr; /* QE time-stamp timer control register */ + __be32 cetsr1; /* QE time-stamp register 1 */ + __be32 cetsr2; /* QE time-stamp register 2 */ + u8 res2[0x8]; + __be32 cevter; /* QE virtual tasks event register */ + __be32 cevtmr; /* QE virtual tasks mask register */ + __be16 cercr; /* QE RAM control register */ + u8 res3[0x2]; + u8 res4[0x24]; + __be16 ceexe1; /* QE external request 1 event register */ + u8 res5[0x2]; + __be16 ceexm1; /* QE external request 1 mask register */ + u8 res6[0x2]; + __be16 ceexe2; /* QE external request 2 event register */ + u8 res7[0x2]; + __be16 ceexm2; /* QE external request 2 mask register */ + u8 res8[0x2]; + __be16 ceexe3; /* QE external request 3 event register */ + u8 res9[0x2]; + __be16 ceexm3; /* QE external request 3 mask register */ + u8 res10[0x2]; + __be16 ceexe4; /* QE external request 4 event register */ + u8 res11[0x2]; + __be16 ceexm4; /* QE external request 4 mask register */ + u8 res12[0x3A]; + __be32 ceurnr; /* QE microcode revision number register */ + u8 res13[0x244]; +} __attribute__ ((packed)); + +/* QE Multiplexer */ +struct qe_mux { + __be32 cmxgcr; /* CMX general clock route register */ + __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ + __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ + __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ + __be32 cmxucr[4]; /* CMX UCCx clock route registers */ + __be32 cmxupcr; /* CMX UPC clock route register */ + u8 res0[0x1C]; +} __attribute__ ((packed)); + +/* QE Timers */ +struct qe_timers { + u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/ + u8 res0[0x3]; + u8 gtcfr2; /* Timer 3 and timer 4 global config register*/ + u8 res1[0xB]; + __be16 gtmdr1; /* Timer 1 mode register */ + __be16 gtmdr2; /* Timer 2 mode register */ + __be16 gtrfr1; /* Timer 1 reference register */ + __be16 gtrfr2; /* Timer 2 reference register */ + __be16 gtcpr1; /* Timer 1 capture register */ + __be16 gtcpr2; /* Timer 2 capture register */ + __be16 gtcnr1; /* Timer 1 counter */ + __be16 gtcnr2; /* Timer 2 counter */ + __be16 gtmdr3; /* Timer 3 mode register */ + __be16 gtmdr4; /* Timer 4 mode register */ + __be16 gtrfr3; /* Timer 3 reference register */ + __be16 gtrfr4; /* Timer 4 reference register */ + __be16 gtcpr3; /* Timer 3 capture register */ + __be16 gtcpr4; /* Timer 4 capture register */ + __be16 gtcnr3; /* Timer 3 counter */ + __be16 gtcnr4; /* Timer 4 counter */ + __be16 gtevr1; /* Timer 1 event register */ + __be16 gtevr2; /* Timer 2 event register */ + __be16 gtevr3; /* Timer 3 event register */ + __be16 gtevr4; /* Timer 4 event register */ + __be16 gtps; /* Timer 1 prescale register */ + u8 res2[0x46]; +} __attribute__ ((packed)); + +/* BRG */ +struct qe_brg { + __be32 brgc[16]; /* BRG configuration registers */ + u8 res0[0x40]; +} __attribute__ ((packed)); + +/* SPI */ +struct spi { + u8 res0[0x20]; + __be32 spmode; /* SPI mode register */ + u8 res1[0x2]; + u8 spie; /* SPI event register */ + u8 res2[0x1]; + u8 res3[0x2]; + u8 spim; /* SPI mask register */ + u8 res4[0x1]; + u8 res5[0x1]; + u8 spcom; /* SPI command register */ + u8 res6[0x2]; + __be32 spitd; /* SPI transmit data register (cpu mode) */ + __be32 spird; /* SPI receive data register (cpu mode) */ + u8 res7[0x8]; +} __attribute__ ((packed)); + +/* SI */ +struct si1 { + __be16 siamr1; /* SI1 TDMA mode register */ + __be16 sibmr1; /* SI1 TDMB mode register */ + __be16 sicmr1; /* SI1 TDMC mode register */ + __be16 sidmr1; /* SI1 TDMD mode register */ + u8 siglmr1_h; /* SI1 global mode register high */ + u8 res0[0x1]; + u8 sicmdr1_h; /* SI1 command register high */ + u8 res2[0x1]; + u8 sistr1_h; /* SI1 status register high */ + u8 res3[0x1]; + __be16 sirsr1_h; /* SI1 RAM shadow address register high */ + u8 sitarc1; /* SI1 RAM counter Tx TDMA */ + u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ + u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ + u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ + u8 sirarc1; /* SI1 RAM counter Rx TDMA */ + u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ + u8 sircrc1; /* SI1 RAM counter Rx TDMC */ + u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ + u8 res4[0x8]; + __be16 siemr1; /* SI1 TDME mode register 16 bits */ + __be16 sifmr1; /* SI1 TDMF mode register 16 bits */ + __be16 sigmr1; /* SI1 TDMG mode register 16 bits */ + __be16 sihmr1; /* SI1 TDMH mode register 16 bits */ + u8 siglmg1_l; /* SI1 global mode register low 8 bits */ + u8 res5[0x1]; + u8 sicmdr1_l; /* SI1 command register low 8 bits */ + u8 res6[0x1]; + u8 sistr1_l; /* SI1 status register low 8 bits */ + u8 res7[0x1]; + __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/ + u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ + u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ + u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ + u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ + u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ + u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ + u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ + u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ + u8 res8[0x8]; + __be32 siml1; /* SI1 multiframe limit register */ + u8 siedm1; /* SI1 extended diagnostic mode register */ + u8 res9[0xBB]; +} __attribute__ ((packed)); + +/* SI Routing Tables */ +struct sir { + u8 tx[0x400]; + u8 rx[0x400]; + u8 res0[0x800]; +} __attribute__ ((packed)); + +/* USB Controller */ +struct qe_usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + __be16 usb_usep[4]; + u8 res2[4]; + __be16 usb_usber; + u8 res3[2]; + __be16 usb_usbmr; + u8 res4[1]; + u8 usb_usbs; + __be16 usb_ussft; + u8 res5[2]; + __be16 usb_usfrn; + u8 res6[0x22]; +} __attribute__ ((packed)); + +/* MCC */ +struct qe_mcc { + __be32 mcce; /* MCC event register */ + __be32 mccm; /* MCC mask register */ + __be32 mccf; /* MCC configuration register */ + __be32 merl; /* MCC emergency request level register */ + u8 res0[0xF0]; +} __attribute__ ((packed)); + +/* QE UCC Slow */ +struct ucc_slow { + __be32 gumr_l; /* UCCx general mode register (low) */ + __be32 gumr_h; /* UCCx general mode register (high) */ + __be16 upsmr; /* UCCx protocol-specific mode register */ + u8 res0[0x2]; + __be16 utodr; /* UCCx transmit on demand register */ + __be16 udsr; /* UCCx data synchronization register */ + __be16 ucce; /* UCCx event register */ + u8 res1[0x2]; + __be16 uccm; /* UCCx mask register */ + u8 res2[0x1]; + u8 uccs; /* UCCx status register */ + u8 res3[0x24]; + __be16 utpt; + u8 res4[0x52]; + u8 guemr; /* UCC general extended mode register */ +} __attribute__ ((packed)); + +/* QE UCC Fast */ +struct ucc_fast { + __be32 gumr; /* UCCx general mode register */ + __be32 upsmr; /* UCCx protocol-specific mode register */ + __be16 utodr; /* UCCx transmit on demand register */ + u8 res0[0x2]; + __be16 udsr; /* UCCx data synchronization register */ + u8 res1[0x2]; + __be32 ucce; /* UCCx event register */ + __be32 uccm; /* UCCx mask register */ + u8 uccs; /* UCCx status register */ + u8 res2[0x7]; + __be32 urfb; /* UCC receive FIFO base */ + __be16 urfs; /* UCC receive FIFO size */ + u8 res3[0x2]; + __be16 urfet; /* UCC receive FIFO emergency threshold */ + __be16 urfset; /* UCC receive FIFO special emergency + threshold */ + __be32 utfb; /* UCC transmit FIFO base */ + __be16 utfs; /* UCC transmit FIFO size */ + u8 res4[0x2]; + __be16 utfet; /* UCC transmit FIFO emergency threshold */ + u8 res5[0x2]; + __be16 utftt; /* UCC transmit FIFO transmit threshold */ + u8 res6[0x2]; + __be16 utpt; /* UCC transmit polling timer */ + u8 res7[0x2]; + __be32 urtry; /* UCC retry counter register */ + u8 res8[0x4C]; + u8 guemr; /* UCC general extended mode register */ +} __attribute__ ((packed)); + +struct ucc { + union { + struct ucc_slow slow; + struct ucc_fast fast; + u8 res[0x200]; /* UCC blocks are 512 bytes each */ + }; +} __attribute__ ((packed)); + +/* MultiPHY UTOPIA POS Controllers (UPC) */ +struct upc { + __be32 upgcr; /* UTOPIA/POS general configuration register */ + __be32 uplpa; /* UTOPIA/POS last PHY address */ + __be32 uphec; /* ATM HEC register */ + __be32 upuc; /* UTOPIA/POS UCC configuration */ + __be32 updc1; /* UTOPIA/POS device 1 configuration */ + __be32 updc2; /* UTOPIA/POS device 2 configuration */ + __be32 updc3; /* UTOPIA/POS device 3 configuration */ + __be32 updc4; /* UTOPIA/POS device 4 configuration */ + __be32 upstpa; /* UTOPIA/POS STPA threshold */ + u8 res0[0xC]; + __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */ + __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */ + __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */ + __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */ + __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */ + __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */ + __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */ + __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */ + __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */ + __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */ + __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */ + __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */ + __be32 upde1; /* UTOPIA/POS device 1 event */ + __be32 upde2; /* UTOPIA/POS device 2 event */ + __be32 upde3; /* UTOPIA/POS device 3 event */ + __be32 upde4; /* UTOPIA/POS device 4 event */ + __be16 uprp1; + __be16 uprp2; + __be16 uprp3; + __be16 uprp4; + u8 res1[0x8]; + __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */ + __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */ + __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */ + __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */ + __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */ + __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */ + __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */ + __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */ + __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */ + __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */ + __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */ + __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */ + __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */ + __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */ + __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */ + __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */ + __be32 uper1; /* Device 1 port enable register */ + __be32 uper2; /* Device 2 port enable register */ + __be32 uper3; /* Device 3 port enable register */ + __be32 uper4; /* Device 4 port enable register */ + u8 res2[0x150]; +} __attribute__ ((packed)); + +/* SDMA */ +struct sdma { + __be32 sdsr; /* Serial DMA status register */ + __be32 sdmr; /* Serial DMA mode register */ + __be32 sdtr1; /* SDMA system bus threshold register */ + __be32 sdtr2; /* SDMA secondary bus threshold register */ + __be32 sdhy1; /* SDMA system bus hysteresis register */ + __be32 sdhy2; /* SDMA secondary bus hysteresis register */ + __be32 sdta1; /* SDMA system bus address register */ + __be32 sdta2; /* SDMA secondary bus address register */ + __be32 sdtm1; /* SDMA system bus MSNUM register */ + __be32 sdtm2; /* SDMA secondary bus MSNUM register */ + u8 res0[0x10]; + __be32 sdaqr; /* SDMA address bus qualify register */ + __be32 sdaqmr; /* SDMA address bus qualify mask register */ + u8 res1[0x4]; + __be32 sdebcr; /* SDMA CAM entries base register */ + u8 res2[0x38]; +} __attribute__ ((packed)); + +/* Debug Space */ +struct dbg { + __be32 bpdcr; /* Breakpoint debug command register */ + __be32 bpdsr; /* Breakpoint debug status register */ + __be32 bpdmr; /* Breakpoint debug mask register */ + __be32 bprmrr0; /* Breakpoint request mode risc register 0 */ + __be32 bprmrr1; /* Breakpoint request mode risc register 1 */ + u8 res0[0x8]; + __be32 bprmtr0; /* Breakpoint request mode trb register 0 */ + __be32 bprmtr1; /* Breakpoint request mode trb register 1 */ + u8 res1[0x8]; + __be32 bprmir; /* Breakpoint request mode immediate register */ + __be32 bprmsr; /* Breakpoint request mode serial register */ + __be32 bpemr; /* Breakpoint exit mode register */ + u8 res2[0x48]; +} __attribute__ ((packed)); + +/* + * RISC Special Registers (Trap and Breakpoint). These are described in + * the QE Developer's Handbook. + */ +struct rsp { + __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */ + u8 res0[64]; + __be32 ibcr0; + __be32 ibs0; + __be32 ibcnr0; + u8 res1[4]; + __be32 ibcr1; + __be32 ibs1; + __be32 ibcnr1; + __be32 npcr; + __be32 dbcr; + __be32 dbar; + __be32 dbamr; + __be32 dbsr; + __be32 dbcnr; + u8 res2[12]; + __be32 dbdr_h; + __be32 dbdr_l; + __be32 dbdmr_h; + __be32 dbdmr_l; + __be32 bsr; + __be32 bor; + __be32 bior; + u8 res3[4]; + __be32 iatr[4]; + __be32 eccr; /* Exception control configuration register */ + __be32 eicr; + u8 res4[0x100-0xf8]; +} __attribute__ ((packed)); + +struct qe_immap { + struct qe_iram iram; /* I-RAM */ + struct qe_ic_regs ic; /* Interrupt Controller */ + struct cp_qe cp; /* Communications Processor */ + struct qe_mux qmx; /* QE Multiplexer */ + struct qe_timers qet; /* QE Timers */ + struct spi spi[0x2]; /* spi */ + struct qe_mcc mcc; /* mcc */ + struct qe_brg brg; /* brg */ + struct qe_usb_ctlr usb; /* USB */ + struct si1 si1; /* SI */ + u8 res11[0x800]; + struct sir sir; /* SI Routing Tables */ + struct ucc ucc1; /* ucc1 */ + struct ucc ucc3; /* ucc3 */ + struct ucc ucc5; /* ucc5 */ + struct ucc ucc7; /* ucc7 */ + u8 res12[0x600]; + struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/ + struct ucc ucc2; /* ucc2 */ + struct ucc ucc4; /* ucc4 */ + struct ucc ucc6; /* ucc6 */ + struct ucc ucc8; /* ucc8 */ + u8 res13[0x600]; + struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ + struct sdma sdma; /* SDMA */ + struct dbg dbg; /* 0x104080 - 0x1040FF + Debug Space */ + struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF + RISC Special Registers + (Trap and Breakpoint) */ + u8 res14[0x300]; /* 0x104300 - 0x1045FF */ + u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */ + u8 res16[0x8000]; /* 0x108000 - 0x110000 */ + u8 muram[0xC000]; /* 0x110000 - 0x11C000 + Multi-user RAM */ + u8 res17[0x24000]; /* 0x11C000 - 0x140000 */ + u8 res18[0xC0000]; /* 0x140000 - 0x200000 */ +} __attribute__ ((packed)); + +extern struct qe_immap __iomem *qe_immr; +extern phys_addr_t get_qe_base(void); + +/* + * Returns the offset within the QE address space of the given pointer. + * + * Note that the QE does not support 36-bit physical addresses, so if + * get_qe_base() returns a number above 4GB, the caller will probably fail. + */ +static inline phys_addr_t immrbar_virt_to_phys(void *address) +{ + void *q = (void *)qe_immr; + + /* Is it a MURAM address? */ + if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) + return get_qe_base() + (address - q); + + /* It's an address returned by kmalloc */ + return virt_to_phys(address); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_IMMAP_QE_H */ diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h new file mode 100644 index 000000000000..c7fa36c335c9 --- /dev/null +++ b/include/soc/fsl/qe/qe.h @@ -0,0 +1,790 @@ +/* + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * QUICC Engine (QE) external definitions and structure. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_QE_H +#define _ASM_POWERPC_QE_H +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/genalloc.h> +#include <linux/spinlock.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <asm/cpm.h> +#include <soc/fsl/qe/immap_qe.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/types.h> + +#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ +#define QE_NUM_OF_BRGS 16 +#define QE_NUM_OF_PORTS 1024 + +/* Memory partitions +*/ +#define MEM_PART_SYSTEM 0 +#define MEM_PART_SECONDARY 1 +#define MEM_PART_MURAM 2 + +/* Clocks and BRGs */ +enum qe_clock { + QE_CLK_NONE = 0, + QE_BRG1, /* Baud Rate Generator 1 */ + QE_BRG2, /* Baud Rate Generator 2 */ + QE_BRG3, /* Baud Rate Generator 3 */ + QE_BRG4, /* Baud Rate Generator 4 */ + QE_BRG5, /* Baud Rate Generator 5 */ + QE_BRG6, /* Baud Rate Generator 6 */ + QE_BRG7, /* Baud Rate Generator 7 */ + QE_BRG8, /* Baud Rate Generator 8 */ + QE_BRG9, /* Baud Rate Generator 9 */ + QE_BRG10, /* Baud Rate Generator 10 */ + QE_BRG11, /* Baud Rate Generator 11 */ + QE_BRG12, /* Baud Rate Generator 12 */ + QE_BRG13, /* Baud Rate Generator 13 */ + QE_BRG14, /* Baud Rate Generator 14 */ + QE_BRG15, /* Baud Rate Generator 15 */ + QE_BRG16, /* Baud Rate Generator 16 */ + QE_CLK1, /* Clock 1 */ + QE_CLK2, /* Clock 2 */ + QE_CLK3, /* Clock 3 */ + QE_CLK4, /* Clock 4 */ + QE_CLK5, /* Clock 5 */ + QE_CLK6, /* Clock 6 */ + QE_CLK7, /* Clock 7 */ + QE_CLK8, /* Clock 8 */ + QE_CLK9, /* Clock 9 */ + QE_CLK10, /* Clock 10 */ + QE_CLK11, /* Clock 11 */ + QE_CLK12, /* Clock 12 */ + QE_CLK13, /* Clock 13 */ + QE_CLK14, /* Clock 14 */ + QE_CLK15, /* Clock 15 */ + QE_CLK16, /* Clock 16 */ + QE_CLK17, /* Clock 17 */ + QE_CLK18, /* Clock 18 */ + QE_CLK19, /* Clock 19 */ + QE_CLK20, /* Clock 20 */ + QE_CLK21, /* Clock 21 */ + QE_CLK22, /* Clock 22 */ + QE_CLK23, /* Clock 23 */ + QE_CLK24, /* Clock 24 */ + QE_CLK_DUMMY +}; + +static inline bool qe_clock_is_brg(enum qe_clock clk) +{ + return clk >= QE_BRG1 && clk <= QE_BRG16; +} + +extern spinlock_t cmxgcr_lock; + +/* Export QE common operations */ +#ifdef CONFIG_QUICC_ENGINE +extern void qe_reset(void); +#else +static inline void qe_reset(void) {} +#endif + +int cpm_muram_init(void); + +#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) +unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); +int cpm_muram_free(unsigned long offset); +unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); +unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo, + void *data); +void __iomem *cpm_muram_addr(unsigned long offset); +unsigned long cpm_muram_offset(void __iomem *addr); +dma_addr_t cpm_muram_dma(void __iomem *addr); +#else +static inline unsigned long cpm_muram_alloc(unsigned long size, + unsigned long align) +{ + return -ENOSYS; +} + +static inline int cpm_muram_free(unsigned long offset) +{ + return -ENOSYS; +} + +static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset, + unsigned long size) +{ + return -ENOSYS; +} + +static inline void __iomem *cpm_muram_addr(unsigned long offset) +{ + return NULL; +} + +static inline unsigned long cpm_muram_offset(void __iomem *addr) +{ + return -ENOSYS; +} + +static inline dma_addr_t cpm_muram_dma(void __iomem *addr) +{ + return 0; +} +#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */ + +/* QE PIO */ +#define QE_PIO_PINS 32 + +struct qe_pio_regs { + __be32 cpodr; /* Open drain register */ + __be32 cpdata; /* Data register */ + __be32 cpdir1; /* Direction register */ + __be32 cpdir2; /* Direction register */ + __be32 cppar1; /* Pin assignment register */ + __be32 cppar2; /* Pin assignment register */ +#ifdef CONFIG_PPC_85xx + u8 pad[8]; +#endif +}; + +#define QE_PIO_DIR_IN 2 +#define QE_PIO_DIR_OUT 1 +extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, + int dir, int open_drain, int assignment, + int has_irq); +#ifdef CONFIG_QUICC_ENGINE +extern int par_io_init(struct device_node *np); +extern int par_io_of_config(struct device_node *np); +extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, + int assignment, int has_irq); +extern int par_io_data_set(u8 port, u8 pin, u8 val); +#else +static inline int par_io_init(struct device_node *np) { return -ENOSYS; } +static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } +static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, + int assignment, int has_irq) { return -ENOSYS; } +static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } +#endif /* CONFIG_QUICC_ENGINE */ + +/* + * Pin multiplexing functions. + */ +struct qe_pin; +#ifdef CONFIG_QE_GPIO +extern struct qe_pin *qe_pin_request(struct device_node *np, int index); +extern void qe_pin_free(struct qe_pin *qe_pin); +extern void qe_pin_set_gpio(struct qe_pin *qe_pin); +extern void qe_pin_set_dedicated(struct qe_pin *pin); +#else +static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) +{ + return ERR_PTR(-ENOSYS); +} +static inline void qe_pin_free(struct qe_pin *qe_pin) {} +static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} +static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} +#endif /* CONFIG_QE_GPIO */ + +#ifdef CONFIG_QUICC_ENGINE +int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); +#else +static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, + u32 cmd_input) +{ + return -ENOSYS; +} +#endif /* CONFIG_QUICC_ENGINE */ + +/* QE internal API */ +enum qe_clock qe_clock_source(const char *source); +unsigned int qe_get_brg_clk(void); +int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); +int qe_get_snum(void); +void qe_put_snum(u8 snum); +unsigned int qe_get_num_of_risc(void); +unsigned int qe_get_num_of_snums(void); + +static inline int qe_alive_during_sleep(void) +{ + /* + * MPC8568E reference manual says: + * + * "...power down sequence waits for all I/O interfaces to become idle. + * In some applications this may happen eventually without actively + * shutting down interfaces, but most likely, software will have to + * take steps to shut down the eTSEC, QUICC Engine Block, and PCI + * interfaces before issuing the command (either the write to the core + * MSR[WE] as described above or writing to POWMGTCSR) to put the + * device into sleep state." + * + * MPC8569E reference manual has a similar paragraph. + */ +#ifdef CONFIG_PPC_85xx + return 0; +#else + return 1; +#endif +} + +/* we actually use cpm_muram implementation, define this for convenience */ +#define qe_muram_init cpm_muram_init +#define qe_muram_alloc cpm_muram_alloc +#define qe_muram_alloc_fixed cpm_muram_alloc_fixed +#define qe_muram_free cpm_muram_free +#define qe_muram_addr cpm_muram_addr +#define qe_muram_offset cpm_muram_offset + +/* Structure that defines QE firmware binary files. + * + * See Documentation/powerpc/qe_firmware.txt for a description of these + * fields. + */ +struct qe_firmware { + struct qe_header { + __be32 length; /* Length of the entire structure, in bytes */ + u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */ + u8 version; /* Version of this layout. First ver is '1' */ + } header; + u8 id[62]; /* Null-terminated identifier string */ + u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */ + u8 count; /* Number of microcode[] structures */ + struct { + __be16 model; /* The SOC model */ + u8 major; /* The SOC revision major */ + u8 minor; /* The SOC revision minor */ + } __attribute__ ((packed)) soc; + u8 padding[4]; /* Reserved, for alignment */ + __be64 extended_modes; /* Extended modes */ + __be32 vtraps[8]; /* Virtual trap addresses */ + u8 reserved[4]; /* Reserved, for future expansion */ + struct qe_microcode { + u8 id[32]; /* Null-terminated identifier */ + __be32 traps[16]; /* Trap addresses, 0 == ignore */ + __be32 eccr; /* The value for the ECCR register */ + __be32 iram_offset; /* Offset into I-RAM for the code */ + __be32 count; /* Number of 32-bit words of the code */ + __be32 code_offset; /* Offset of the actual microcode */ + u8 major; /* The microcode version major */ + u8 minor; /* The microcode version minor */ + u8 revision; /* The microcode version revision */ + u8 padding; /* Reserved, for alignment */ + u8 reserved[4]; /* Reserved, for future expansion */ + } __attribute__ ((packed)) microcode[1]; + /* All microcode binaries should be located here */ + /* CRC32 should be located here, after the microcode binaries */ +} __attribute__ ((packed)); + +struct qe_firmware_info { + char id[64]; /* Firmware name */ + u32 vtraps[8]; /* Virtual trap addresses */ + u64 extended_modes; /* Extended modes */ +}; + +#ifdef CONFIG_QUICC_ENGINE +/* Upload a firmware to the QE */ +int qe_upload_firmware(const struct qe_firmware *firmware); +#else +static inline int qe_upload_firmware(const struct qe_firmware *firmware) +{ + return -ENOSYS; +} +#endif /* CONFIG_QUICC_ENGINE */ + +/* Obtain information on the uploaded firmware */ +struct qe_firmware_info *qe_get_firmware_info(void); + +/* QE USB */ +int qe_usb_clock_set(enum qe_clock clk, int rate); + +/* Buffer descriptors */ +struct qe_bd { + __be16 status; + __be16 length; + __be32 buf; +} __attribute__ ((packed)); + +#define BD_STATUS_MASK 0xffff0000 +#define BD_LENGTH_MASK 0x0000ffff + +/* Alignment */ +#define QE_INTR_TABLE_ALIGN 16 /* ??? */ +#define QE_ALIGNMENT_OF_BD 8 +#define QE_ALIGNMENT_OF_PRAM 64 + +/* RISC allocation */ +#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ +#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ +#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ +#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ +#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2) +#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2 | \ + QE_RISC_ALLOCATION_RISC3 | \ + QE_RISC_ALLOCATION_RISC4) + +/* QE extended filtering Table Lookup Key Size */ +enum qe_fltr_tbl_lookup_key_size { + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES + = 0x3f, /* LookupKey parsed by the Generate LookupKey + CMD is truncated to 8 bytes */ + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES + = 0x5f, /* LookupKey parsed by the Generate LookupKey + CMD is truncated to 16 bytes */ +}; + +/* QE FLTR extended filtering Largest External Table Lookup Key Size */ +enum qe_fltr_largest_external_tbl_lookup_key_size { + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE + = 0x0,/* not used */ + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES + = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */ + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES + = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */ +}; + +/* structure representing QE parameter RAM */ +struct qe_timer_tables { + u16 tm_base; /* QE timer table base adr */ + u16 tm_ptr; /* QE timer table pointer */ + u16 r_tmr; /* QE timer mode register */ + u16 r_tmv; /* QE timer valid register */ + u32 tm_cmd; /* QE timer cmd register */ + u32 tm_cnt; /* QE timer internal cnt */ +} __attribute__ ((packed)); + +#define QE_FLTR_TAD_SIZE 8 + +/* QE extended filtering Termination Action Descriptor (TAD) */ +struct qe_fltr_tad { + u8 serialized[QE_FLTR_TAD_SIZE]; +} __attribute__ ((packed)); + +/* Communication Direction */ +enum comm_dir { + COMM_DIR_NONE = 0, + COMM_DIR_RX = 1, + COMM_DIR_TX = 2, + COMM_DIR_RX_AND_TX = 3 +}; + +/* QE CMXUCR Registers. + * There are two UCCs represented in each of the four CMXUCR registers. + * These values are for the UCC in the LSBs + */ +#define QE_CMXUCR_MII_ENET_MNG 0x00007000 +#define QE_CMXUCR_MII_ENET_MNG_SHIFT 12 +#define QE_CMXUCR_GRANT 0x00008000 +#define QE_CMXUCR_TSA 0x00004000 +#define QE_CMXUCR_BKPT 0x00000100 +#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F + +/* QE CMXGCR Registers. +*/ +#define QE_CMXGCR_MII_ENET_MNG 0x00007000 +#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 +#define QE_CMXGCR_USBCS 0x0000000f +#define QE_CMXGCR_USBCS_CLK3 0x1 +#define QE_CMXGCR_USBCS_CLK5 0x2 +#define QE_CMXGCR_USBCS_CLK7 0x3 +#define QE_CMXGCR_USBCS_CLK9 0x4 +#define QE_CMXGCR_USBCS_CLK13 0x5 +#define QE_CMXGCR_USBCS_CLK17 0x6 +#define QE_CMXGCR_USBCS_CLK19 0x7 +#define QE_CMXGCR_USBCS_CLK21 0x8 +#define QE_CMXGCR_USBCS_BRG9 0x9 +#define QE_CMXGCR_USBCS_BRG10 0xa + +/* QE CECR Commands. +*/ +#define QE_CR_FLG 0x00010000 +#define QE_RESET 0x80000000 +#define QE_INIT_TX_RX 0x00000000 +#define QE_INIT_RX 0x00000001 +#define QE_INIT_TX 0x00000002 +#define QE_ENTER_HUNT_MODE 0x00000003 +#define QE_STOP_TX 0x00000004 +#define QE_GRACEFUL_STOP_TX 0x00000005 +#define QE_RESTART_TX 0x00000006 +#define QE_CLOSE_RX_BD 0x00000007 +#define QE_SWITCH_COMMAND 0x00000007 +#define QE_SET_GROUP_ADDRESS 0x00000008 +#define QE_START_IDMA 0x00000009 +#define QE_MCC_STOP_RX 0x00000009 +#define QE_ATM_TRANSMIT 0x0000000a +#define QE_HPAC_CLEAR_ALL 0x0000000b +#define QE_GRACEFUL_STOP_RX 0x0000001a +#define QE_RESTART_RX 0x0000001b +#define QE_HPAC_SET_PRIORITY 0x0000010b +#define QE_HPAC_STOP_TX 0x0000020b +#define QE_HPAC_STOP_RX 0x0000030b +#define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b +#define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b +#define QE_HPAC_START_TX 0x0000060b +#define QE_HPAC_START_RX 0x0000070b +#define QE_USB_STOP_TX 0x0000000a +#define QE_USB_RESTART_TX 0x0000000c +#define QE_QMC_STOP_TX 0x0000000c +#define QE_QMC_STOP_RX 0x0000000d +#define QE_SS7_SU_FIL_RESET 0x0000000e +/* jonathbr added from here down for 83xx */ +#define QE_RESET_BCS 0x0000000a +#define QE_MCC_INIT_TX_RX_16 0x00000003 +#define QE_MCC_STOP_TX 0x00000004 +#define QE_MCC_INIT_TX_1 0x00000005 +#define QE_MCC_INIT_RX_1 0x00000006 +#define QE_MCC_RESET 0x00000007 +#define QE_SET_TIMER 0x00000008 +#define QE_RANDOM_NUMBER 0x0000000c +#define QE_ATM_MULTI_THREAD_INIT 0x00000011 +#define QE_ASSIGN_PAGE 0x00000012 +#define QE_ADD_REMOVE_HASH_ENTRY 0x00000013 +#define QE_START_FLOW_CONTROL 0x00000014 +#define QE_STOP_FLOW_CONTROL 0x00000015 +#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016 + +#define QE_ASSIGN_RISC 0x00000010 +#define QE_CR_MCN_NORMAL_SHIFT 6 +#define QE_CR_MCN_USB_SHIFT 4 +#define QE_CR_MCN_RISC_ASSIGN_SHIFT 8 +#define QE_CR_SNUM_SHIFT 17 + +/* QE CECR Sub Block - sub block of QE command. +*/ +#define QE_CR_SUBBLOCK_INVALID 0x00000000 +#define QE_CR_SUBBLOCK_USB 0x03200000 +#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000 +#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000 +#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000 +#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000 +#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000 +#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000 +#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000 +#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000 +#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000 +#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000 +#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000 +#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000 +#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000 +#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000 +#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000 +#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000 +#define QE_CR_SUBBLOCK_MCC1 0x03800000 +#define QE_CR_SUBBLOCK_MCC2 0x03a00000 +#define QE_CR_SUBBLOCK_MCC3 0x03000000 +#define QE_CR_SUBBLOCK_IDMA1 0x02800000 +#define QE_CR_SUBBLOCK_IDMA2 0x02a00000 +#define QE_CR_SUBBLOCK_IDMA3 0x02c00000 +#define QE_CR_SUBBLOCK_IDMA4 0x02e00000 +#define QE_CR_SUBBLOCK_HPAC 0x01e00000 +#define QE_CR_SUBBLOCK_SPI1 0x01400000 +#define QE_CR_SUBBLOCK_SPI2 0x01600000 +#define QE_CR_SUBBLOCK_RAND 0x01c00000 +#define QE_CR_SUBBLOCK_TIMER 0x01e00000 +#define QE_CR_SUBBLOCK_GENERAL 0x03c00000 + +/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */ +#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */ +#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00 +#define QE_CR_PROTOCOL_QMC 0x02 +#define QE_CR_PROTOCOL_UART 0x04 +#define QE_CR_PROTOCOL_ATM_POS 0x0A +#define QE_CR_PROTOCOL_ETHERNET 0x0C +#define QE_CR_PROTOCOL_L2_SWITCH 0x0D + +/* BRG configuration register */ +#define QE_BRGC_ENABLE 0x00010000 +#define QE_BRGC_DIVISOR_SHIFT 1 +#define QE_BRGC_DIVISOR_MAX 0xFFF +#define QE_BRGC_DIV16 1 + +/* QE Timers registers */ +#define QE_GTCFR1_PCAS 0x80 +#define QE_GTCFR1_STP2 0x20 +#define QE_GTCFR1_RST2 0x10 +#define QE_GTCFR1_GM2 0x08 +#define QE_GTCFR1_GM1 0x04 +#define QE_GTCFR1_STP1 0x02 +#define QE_GTCFR1_RST1 0x01 + +/* SDMA registers */ +#define QE_SDSR_BER1 0x02000000 +#define QE_SDSR_BER2 0x01000000 + +#define QE_SDMR_GLB_1_MSK 0x80000000 +#define QE_SDMR_ADR_SEL 0x20000000 +#define QE_SDMR_BER1_MSK 0x02000000 +#define QE_SDMR_BER2_MSK 0x01000000 +#define QE_SDMR_EB1_MSK 0x00800000 +#define QE_SDMR_ER1_MSK 0x00080000 +#define QE_SDMR_ER2_MSK 0x00040000 +#define QE_SDMR_CEN_MASK 0x0000E000 +#define QE_SDMR_SBER_1 0x00000200 +#define QE_SDMR_SBER_2 0x00000200 +#define QE_SDMR_EB1_PR_MASK 0x000000C0 +#define QE_SDMR_ER1_PR 0x00000008 + +#define QE_SDMR_CEN_SHIFT 13 +#define QE_SDMR_EB1_PR_SHIFT 6 + +#define QE_SDTM_MSNUM_SHIFT 24 + +#define QE_SDEBCR_BA_MASK 0x01FFFFFF + +/* Communication Processor */ +#define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */ +#define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */ +#define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */ + +/* I-RAM */ +#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */ +#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */ +#define QE_IRAM_READY 0x80000000 /* Ready */ + +/* UPC */ +#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */ +#define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */ +#define UPGCR_RMS 0x20000000 /* Receive master/slave mode */ +#define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */ +#define UPGCR_DIAG 0x01000000 /* Diagnostic mode */ + +/* UCC GUEMR register */ +#define UCC_GUEMR_MODE_MASK_RX 0x02 +#define UCC_GUEMR_MODE_FAST_RX 0x02 +#define UCC_GUEMR_MODE_SLOW_RX 0x00 +#define UCC_GUEMR_MODE_MASK_TX 0x01 +#define UCC_GUEMR_MODE_FAST_TX 0x01 +#define UCC_GUEMR_MODE_SLOW_TX 0x00 +#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX) +#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but + must be set 1 */ + +/* structure representing UCC SLOW parameter RAM */ +struct ucc_slow_pram { + __be16 rbase; /* RX BD base address */ + __be16 tbase; /* TX BD base address */ + u8 rbmr; /* RX bus mode register (same as CPM's RFCR) */ + u8 tbmr; /* TX bus mode register (same as CPM's TFCR) */ + __be16 mrblr; /* Rx buffer length */ + __be32 rstate; /* Rx internal state */ + __be32 rptr; /* Rx internal data pointer */ + __be16 rbptr; /* rb BD Pointer */ + __be16 rcount; /* Rx internal byte count */ + __be32 rtemp; /* Rx temp */ + __be32 tstate; /* Tx internal state */ + __be32 tptr; /* Tx internal data pointer */ + __be16 tbptr; /* Tx BD pointer */ + __be16 tcount; /* Tx byte count */ + __be32 ttemp; /* Tx temp */ + __be32 rcrc; /* temp receive CRC */ + __be32 tcrc; /* temp transmit CRC */ +} __attribute__ ((packed)); + +/* General UCC SLOW Mode Register (GUMRH & GUMRL) */ +#define UCC_SLOW_GUMR_H_SAM_QMC 0x00000000 +#define UCC_SLOW_GUMR_H_SAM_SATM 0x00008000 +#define UCC_SLOW_GUMR_H_REVD 0x00002000 +#define UCC_SLOW_GUMR_H_TRX 0x00001000 +#define UCC_SLOW_GUMR_H_TTX 0x00000800 +#define UCC_SLOW_GUMR_H_CDP 0x00000400 +#define UCC_SLOW_GUMR_H_CTSP 0x00000200 +#define UCC_SLOW_GUMR_H_CDS 0x00000100 +#define UCC_SLOW_GUMR_H_CTSS 0x00000080 +#define UCC_SLOW_GUMR_H_TFL 0x00000040 +#define UCC_SLOW_GUMR_H_RFW 0x00000020 +#define UCC_SLOW_GUMR_H_TXSY 0x00000010 +#define UCC_SLOW_GUMR_H_4SYNC 0x00000004 +#define UCC_SLOW_GUMR_H_8SYNC 0x00000008 +#define UCC_SLOW_GUMR_H_16SYNC 0x0000000c +#define UCC_SLOW_GUMR_H_RTSM 0x00000002 +#define UCC_SLOW_GUMR_H_RSYN 0x00000001 + +#define UCC_SLOW_GUMR_L_TCI 0x10000000 +#define UCC_SLOW_GUMR_L_RINV 0x02000000 +#define UCC_SLOW_GUMR_L_TINV 0x01000000 +#define UCC_SLOW_GUMR_L_TEND 0x00040000 +#define UCC_SLOW_GUMR_L_TDCR_MASK 0x00030000 +#define UCC_SLOW_GUMR_L_TDCR_32 0x00030000 +#define UCC_SLOW_GUMR_L_TDCR_16 0x00020000 +#define UCC_SLOW_GUMR_L_TDCR_8 0x00010000 +#define UCC_SLOW_GUMR_L_TDCR_1 0x00000000 +#define UCC_SLOW_GUMR_L_RDCR_MASK 0x0000c000 +#define UCC_SLOW_GUMR_L_RDCR_32 0x0000c000 +#define UCC_SLOW_GUMR_L_RDCR_16 0x00008000 +#define UCC_SLOW_GUMR_L_RDCR_8 0x00004000 +#define UCC_SLOW_GUMR_L_RDCR_1 0x00000000 +#define UCC_SLOW_GUMR_L_RENC_NRZI 0x00000800 +#define UCC_SLOW_GUMR_L_RENC_NRZ 0x00000000 +#define UCC_SLOW_GUMR_L_TENC_NRZI 0x00000100 +#define UCC_SLOW_GUMR_L_TENC_NRZ 0x00000000 +#define UCC_SLOW_GUMR_L_DIAG_MASK 0x000000c0 +#define UCC_SLOW_GUMR_L_DIAG_LE 0x000000c0 +#define UCC_SLOW_GUMR_L_DIAG_ECHO 0x00000080 +#define UCC_SLOW_GUMR_L_DIAG_LOOP 0x00000040 +#define UCC_SLOW_GUMR_L_DIAG_NORM 0x00000000 +#define UCC_SLOW_GUMR_L_ENR 0x00000020 +#define UCC_SLOW_GUMR_L_ENT 0x00000010 +#define UCC_SLOW_GUMR_L_MODE_MASK 0x0000000F +#define UCC_SLOW_GUMR_L_MODE_BISYNC 0x00000008 +#define UCC_SLOW_GUMR_L_MODE_AHDLC 0x00000006 +#define UCC_SLOW_GUMR_L_MODE_UART 0x00000004 +#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 + +/* General UCC FAST Mode Register */ +#define UCC_FAST_GUMR_TCI 0x20000000 +#define UCC_FAST_GUMR_TRX 0x10000000 +#define UCC_FAST_GUMR_TTX 0x08000000 +#define UCC_FAST_GUMR_CDP 0x04000000 +#define UCC_FAST_GUMR_CTSP 0x02000000 +#define UCC_FAST_GUMR_CDS 0x01000000 +#define UCC_FAST_GUMR_CTSS 0x00800000 +#define UCC_FAST_GUMR_TXSY 0x00020000 +#define UCC_FAST_GUMR_RSYN 0x00010000 +#define UCC_FAST_GUMR_RTSM 0x00002000 +#define UCC_FAST_GUMR_REVD 0x00000400 +#define UCC_FAST_GUMR_ENR 0x00000020 +#define UCC_FAST_GUMR_ENT 0x00000010 + +/* UART Slow UCC Event Register (UCCE) */ +#define UCC_UART_UCCE_AB 0x0200 +#define UCC_UART_UCCE_IDLE 0x0100 +#define UCC_UART_UCCE_GRA 0x0080 +#define UCC_UART_UCCE_BRKE 0x0040 +#define UCC_UART_UCCE_BRKS 0x0020 +#define UCC_UART_UCCE_CCR 0x0008 +#define UCC_UART_UCCE_BSY 0x0004 +#define UCC_UART_UCCE_TX 0x0002 +#define UCC_UART_UCCE_RX 0x0001 + +/* HDLC Slow UCC Event Register (UCCE) */ +#define UCC_HDLC_UCCE_GLR 0x1000 +#define UCC_HDLC_UCCE_GLT 0x0800 +#define UCC_HDLC_UCCE_IDLE 0x0100 +#define UCC_HDLC_UCCE_BRKE 0x0040 +#define UCC_HDLC_UCCE_BRKS 0x0020 +#define UCC_HDLC_UCCE_TXE 0x0010 +#define UCC_HDLC_UCCE_RXF 0x0008 +#define UCC_HDLC_UCCE_BSY 0x0004 +#define UCC_HDLC_UCCE_TXB 0x0002 +#define UCC_HDLC_UCCE_RXB 0x0001 + +/* BISYNC Slow UCC Event Register (UCCE) */ +#define UCC_BISYNC_UCCE_GRA 0x0080 +#define UCC_BISYNC_UCCE_TXE 0x0010 +#define UCC_BISYNC_UCCE_RCH 0x0008 +#define UCC_BISYNC_UCCE_BSY 0x0004 +#define UCC_BISYNC_UCCE_TXB 0x0002 +#define UCC_BISYNC_UCCE_RXB 0x0001 + +/* Gigabit Ethernet Fast UCC Event Register (UCCE) */ +#define UCC_GETH_UCCE_MPD 0x80000000 +#define UCC_GETH_UCCE_SCAR 0x40000000 +#define UCC_GETH_UCCE_GRA 0x20000000 +#define UCC_GETH_UCCE_CBPR 0x10000000 +#define UCC_GETH_UCCE_BSY 0x08000000 +#define UCC_GETH_UCCE_RXC 0x04000000 +#define UCC_GETH_UCCE_TXC 0x02000000 +#define UCC_GETH_UCCE_TXE 0x01000000 +#define UCC_GETH_UCCE_TXB7 0x00800000 +#define UCC_GETH_UCCE_TXB6 0x00400000 +#define UCC_GETH_UCCE_TXB5 0x00200000 +#define UCC_GETH_UCCE_TXB4 0x00100000 +#define UCC_GETH_UCCE_TXB3 0x00080000 +#define UCC_GETH_UCCE_TXB2 0x00040000 +#define UCC_GETH_UCCE_TXB1 0x00020000 +#define UCC_GETH_UCCE_TXB0 0x00010000 +#define UCC_GETH_UCCE_RXB7 0x00008000 +#define UCC_GETH_UCCE_RXB6 0x00004000 +#define UCC_GETH_UCCE_RXB5 0x00002000 +#define UCC_GETH_UCCE_RXB4 0x00001000 +#define UCC_GETH_UCCE_RXB3 0x00000800 +#define UCC_GETH_UCCE_RXB2 0x00000400 +#define UCC_GETH_UCCE_RXB1 0x00000200 +#define UCC_GETH_UCCE_RXB0 0x00000100 +#define UCC_GETH_UCCE_RXF7 0x00000080 +#define UCC_GETH_UCCE_RXF6 0x00000040 +#define UCC_GETH_UCCE_RXF5 0x00000020 +#define UCC_GETH_UCCE_RXF4 0x00000010 +#define UCC_GETH_UCCE_RXF3 0x00000008 +#define UCC_GETH_UCCE_RXF2 0x00000004 +#define UCC_GETH_UCCE_RXF1 0x00000002 +#define UCC_GETH_UCCE_RXF0 0x00000001 + +/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */ +#define UCC_UART_UPSMR_FLC 0x8000 +#define UCC_UART_UPSMR_SL 0x4000 +#define UCC_UART_UPSMR_CL_MASK 0x3000 +#define UCC_UART_UPSMR_CL_8 0x3000 +#define UCC_UART_UPSMR_CL_7 0x2000 +#define UCC_UART_UPSMR_CL_6 0x1000 +#define UCC_UART_UPSMR_CL_5 0x0000 +#define UCC_UART_UPSMR_UM_MASK 0x0c00 +#define UCC_UART_UPSMR_UM_NORMAL 0x0000 +#define UCC_UART_UPSMR_UM_MAN_MULTI 0x0400 +#define UCC_UART_UPSMR_UM_AUTO_MULTI 0x0c00 +#define UCC_UART_UPSMR_FRZ 0x0200 +#define UCC_UART_UPSMR_RZS 0x0100 +#define UCC_UART_UPSMR_SYN 0x0080 +#define UCC_UART_UPSMR_DRT 0x0040 +#define UCC_UART_UPSMR_PEN 0x0010 +#define UCC_UART_UPSMR_RPM_MASK 0x000c +#define UCC_UART_UPSMR_RPM_ODD 0x0000 +#define UCC_UART_UPSMR_RPM_LOW 0x0004 +#define UCC_UART_UPSMR_RPM_EVEN 0x0008 +#define UCC_UART_UPSMR_RPM_HIGH 0x000C +#define UCC_UART_UPSMR_TPM_MASK 0x0003 +#define UCC_UART_UPSMR_TPM_ODD 0x0000 +#define UCC_UART_UPSMR_TPM_LOW 0x0001 +#define UCC_UART_UPSMR_TPM_EVEN 0x0002 +#define UCC_UART_UPSMR_TPM_HIGH 0x0003 + +/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */ +#define UCC_GETH_UPSMR_FTFE 0x80000000 +#define UCC_GETH_UPSMR_PTPE 0x40000000 +#define UCC_GETH_UPSMR_ECM 0x04000000 +#define UCC_GETH_UPSMR_HSE 0x02000000 +#define UCC_GETH_UPSMR_PRO 0x00400000 +#define UCC_GETH_UPSMR_CAP 0x00200000 +#define UCC_GETH_UPSMR_RSH 0x00100000 +#define UCC_GETH_UPSMR_RPM 0x00080000 +#define UCC_GETH_UPSMR_R10M 0x00040000 +#define UCC_GETH_UPSMR_RLPB 0x00020000 +#define UCC_GETH_UPSMR_TBIM 0x00010000 +#define UCC_GETH_UPSMR_RES1 0x00002000 +#define UCC_GETH_UPSMR_RMM 0x00001000 +#define UCC_GETH_UPSMR_CAM 0x00000400 +#define UCC_GETH_UPSMR_BRO 0x00000200 +#define UCC_GETH_UPSMR_SMM 0x00000080 +#define UCC_GETH_UPSMR_SGMM 0x00000020 + +/* UCC Transmit On Demand Register (UTODR) */ +#define UCC_SLOW_TOD 0x8000 +#define UCC_FAST_TOD 0x8000 + +/* UCC Bus Mode Register masks */ +/* Not to be confused with the Bundle Mode Register */ +#define UCC_BMR_GBL 0x20 +#define UCC_BMR_BO_BE 0x10 +#define UCC_BMR_CETM 0x04 +#define UCC_BMR_DTB 0x02 +#define UCC_BMR_BDB 0x01 + +/* Function code masks */ +#define FC_GBL 0x20 +#define FC_DTB_LCL 0x02 +#define UCC_FAST_FUNCTION_CODE_GBL 0x20 +#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02 +#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01 + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_QE_H */ diff --git a/include/soc/fsl/qe/qe_ic.h b/include/soc/fsl/qe/qe_ic.h new file mode 100644 index 000000000000..1e155ca6d33c --- /dev/null +++ b/include/soc/fsl/qe/qe_ic.h @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * QE IC external definitions and structure. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_QE_IC_H +#define _ASM_POWERPC_QE_IC_H + +#include <linux/irq.h> + +struct device_node; +struct qe_ic; + +#define NUM_OF_QE_IC_GROUPS 6 + +/* Flags when we init the QE IC */ +#define QE_IC_SPREADMODE_GRP_W 0x00000001 +#define QE_IC_SPREADMODE_GRP_X 0x00000002 +#define QE_IC_SPREADMODE_GRP_Y 0x00000004 +#define QE_IC_SPREADMODE_GRP_Z 0x00000008 +#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010 +#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020 + +#define QE_IC_LOW_SIGNAL 0x00000100 +#define QE_IC_HIGH_SIGNAL 0x00000200 + +#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000 +#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000 +#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000 +#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000 +#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000 +#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000 +#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000 +#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000 +#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000 +#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000 +#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000 +#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000 +#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12) + +/* QE interrupt sources groups */ +enum qe_ic_grp_id { + QE_IC_GRP_W = 0, /* QE interrupt controller group W */ + QE_IC_GRP_X, /* QE interrupt controller group X */ + QE_IC_GRP_Y, /* QE interrupt controller group Y */ + QE_IC_GRP_Z, /* QE interrupt controller group Z */ + QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ + QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ +}; + +#ifdef CONFIG_QUICC_ENGINE +void qe_ic_init(struct device_node *node, unsigned int flags, + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)); +unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); +unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); +#else +static inline void qe_ic_init(struct device_node *node, unsigned int flags, + void (*low_handler)(struct irq_desc *desc), + void (*high_handler)(struct irq_desc *desc)) +{} +static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +{ return 0; } +static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +{ return 0; } +#endif /* CONFIG_QUICC_ENGINE */ + +void qe_ic_set_highest_priority(unsigned int virq, int high); +int qe_ic_set_priority(unsigned int virq, unsigned int priority); +int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); + +static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); +} + +static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); +} + +static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq; + struct irq_chip *chip = irq_desc_get_chip(desc); + + cascade_irq = qe_ic_get_high_irq(qe_ic); + if (cascade_irq == NO_IRQ) + cascade_irq = qe_ic_get_low_irq(qe_ic); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +#endif /* _ASM_POWERPC_QE_IC_H */ diff --git a/include/soc/fsl/qe/ucc.h b/include/soc/fsl/qe/ucc.h new file mode 100644 index 000000000000..894f14cbb044 --- /dev/null +++ b/include/soc/fsl/qe/ucc.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * Internal header file for UCC unit routines. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_H__ +#define __UCC_H__ + +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> + +#define STATISTICS + +#define UCC_MAX_NUM 8 + +/* Slow or fast type for UCCs. +*/ +enum ucc_speed_type { + UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX, + UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX +}; + +/* ucc_set_type + * Sets UCC to slow or fast mode. + * + * ucc_num - (In) number of UCC (0-7). + * speed - (In) slow or fast mode for UCC. + */ +int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed); + +int ucc_set_qe_mux_mii_mng(unsigned int ucc_num); + +int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, + enum comm_dir mode); + +int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask); + +/* QE MUX clock routing for UCC +*/ +static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT); +} + +static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA); +} + +static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set) +{ + return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT); +} + +#endif /* __UCC_H__ */ diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h new file mode 100644 index 000000000000..df8ea7958c63 --- /dev/null +++ b/include/soc/fsl/qe/ucc_fast.h @@ -0,0 +1,244 @@ +/* + * Internal header file for UCC FAST unit routines. + * + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_FAST_H__ +#define __UCC_FAST_H__ + +#include <linux/kernel.h> + +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> + +#include <soc/fsl/qe/ucc.h> + +/* Receive BD's status */ +#define R_E 0x80000000 /* buffer empty */ +#define R_W 0x20000000 /* wrap bit */ +#define R_I 0x10000000 /* interrupt on reception */ +#define R_L 0x08000000 /* last */ +#define R_F 0x04000000 /* first */ + +/* transmit BD's status */ +#define T_R 0x80000000 /* ready bit */ +#define T_W 0x20000000 /* wrap bit */ +#define T_I 0x10000000 /* interrupt on completion */ +#define T_L 0x08000000 /* last */ + +/* Rx Data buffer must be 4 bytes aligned in most cases */ +#define UCC_FAST_RX_ALIGN 4 +#define UCC_FAST_MRBLR_ALIGNMENT 4 +#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8 + +/* Sizes */ +#define UCC_FAST_URFS_MIN_VAL 0x88 +#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR 8 + +/* ucc_fast_channel_protocol_mode - UCC FAST mode */ +enum ucc_fast_channel_protocol_mode { + UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000, + UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001, + UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002, + UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003, + UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004, + UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005, + UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006, + UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007, + UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008, + UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009, + UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A, + UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B, + UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C, + UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D, + UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E, + UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F +}; + +/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */ +enum ucc_fast_transparent_txrx { + UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000, + UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000 +}; + +/* UCC fast diagnostic mode */ +enum ucc_fast_diag_mode { + UCC_FAST_DIAGNOSTIC_NORMAL = 0x0, + UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000, + UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000, + UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000 +}; + +/* UCC fast Sync length (transparent mode only) */ +enum ucc_fast_sync_len { + UCC_FAST_SYNC_LEN_NOT_USED = 0x0, + UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000, + UCC_FAST_SYNC_LEN_8_BIT = 0x00008000, + UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000 +}; + +/* UCC fast RTS mode */ +enum ucc_fast_ready_to_send { + UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000, + UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000 +}; + +/* UCC fast receiver decoding mode */ +enum ucc_fast_rx_decoding_method { + UCC_FAST_RX_ENCODING_NRZ = 0x00000000, + UCC_FAST_RX_ENCODING_NRZI = 0x00000800, + UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000, + UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800 +}; + +/* UCC fast transmitter encoding mode */ +enum ucc_fast_tx_encoding_method { + UCC_FAST_TX_ENCODING_NRZ = 0x00000000, + UCC_FAST_TX_ENCODING_NRZI = 0x00000100, + UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200, + UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300 +}; + +/* UCC fast CRC length */ +enum ucc_fast_transparent_tcrc { + UCC_FAST_16_BIT_CRC = 0x00000000, + UCC_FAST_CRC_RESERVED0 = 0x00000040, + UCC_FAST_32_BIT_CRC = 0x00000080, + UCC_FAST_CRC_RESERVED1 = 0x000000C0 +}; + +/* Fast UCC initialization structure */ +struct ucc_fast_info { + int ucc_num; + enum qe_clock rx_clock; + enum qe_clock tx_clock; + u32 regs; + int irq; + u32 uccm_mask; + int bd_mem_part; + int brkpt_support; + int grant_support; + int tsa; + int cdp; + int cds; + int ctsp; + int ctss; + int tci; + int txsy; + int rtsm; + int revd; + int rsyn; + u16 max_rx_buf_length; + u16 urfs; + u16 urfet; + u16 urfset; + u16 utfs; + u16 utfet; + u16 utftt; + u16 ufpt; + enum ucc_fast_channel_protocol_mode mode; + enum ucc_fast_transparent_txrx ttx_trx; + enum ucc_fast_tx_encoding_method tenc; + enum ucc_fast_rx_decoding_method renc; + enum ucc_fast_transparent_tcrc tcrc; + enum ucc_fast_sync_len synl; +}; + +struct ucc_fast_private { + struct ucc_fast_info *uf_info; + struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ + u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ + u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ +#ifdef CONFIG_UGETH_TX_ON_DEMAND + u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ +#endif + int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ + int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ + int stopped_tx; /* Whether channel has been stopped for Tx + (STOP_TX, etc.) */ + int stopped_rx; /* Whether channel has been stopped for Rx */ + u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx + virtual fifo */ + u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx + virtual fifo */ +#ifdef STATISTICS + u32 tx_frames; /* Transmitted frames counter. */ + u32 rx_frames; /* Received frames counter (only frames + passed to application). */ + u32 tx_discarded; /* Discarded tx frames counter (frames that + were discarded by the driver due to errors). + */ + u32 rx_discarded; /* Discarded rx frames counter (frames that + were discarded by the driver due to errors). + */ +#endif /* STATISTICS */ + u16 mrblr; /* maximum receive buffer length */ +}; + +/* ucc_fast_init + * Initializes Fast UCC according to user provided parameters. + * + * uf_info - (In) pointer to the fast UCC info structure. + * uccf_ret - (Out) pointer to the fast UCC structure. + */ +int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret); + +/* ucc_fast_free + * Frees all resources for fast UCC. + * + * uccf - (In) pointer to the fast UCC structure. + */ +void ucc_fast_free(struct ucc_fast_private * uccf); + +/* ucc_fast_enable + * Enables a fast UCC port. + * This routine enables Tx and/or Rx through the General UCC Mode Register. + * + * uccf - (In) pointer to the fast UCC structure. + * mode - (In) TX, RX, or both. + */ +void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode); + +/* ucc_fast_disable + * Disables a fast UCC port. + * This routine disables Tx and/or Rx through the General UCC Mode Register. + * + * uccf - (In) pointer to the fast UCC structure. + * mode - (In) TX, RX, or both. + */ +void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode); + +/* ucc_fast_irq + * Handles interrupts on fast UCC. + * Called from the general interrupt routine to handle interrupts on fast UCC. + * + * uccf - (In) pointer to the fast UCC structure. + */ +void ucc_fast_irq(struct ucc_fast_private * uccf); + +/* ucc_fast_transmit_on_demand + * Immediately forces a poll of the transmitter for data to be sent. + * Typically, the hardware performs a periodic poll for data that the + * transmit routine has set up to be transmitted. In cases where + * this polling cycle is not soon enough, this optional routine can + * be invoked to force a poll right away, instead. Proper use for + * each transmission for which this functionality is desired is to + * call the transmit routine and then this routine right after. + * + * uccf - (In) pointer to the fast UCC structure. + */ +void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf); + +u32 ucc_fast_get_qe_cr_subblock(int uccf_num); + +void ucc_fast_dump_regs(struct ucc_fast_private * uccf); + +#endif /* __UCC_FAST_H__ */ diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h new file mode 100644 index 000000000000..6c0573a0825c --- /dev/null +++ b/include/soc/fsl/qe/ucc_slow.h @@ -0,0 +1,277 @@ +/* + * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * Internal header file for UCC SLOW unit routines. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_SLOW_H__ +#define __UCC_SLOW_H__ + +#include <linux/kernel.h> + +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> + +#include <soc/fsl/qe/ucc.h> + +/* transmit BD's status */ +#define T_R 0x80000000 /* ready bit */ +#define T_PAD 0x40000000 /* add pads to short frames */ +#define T_W 0x20000000 /* wrap bit */ +#define T_I 0x10000000 /* interrupt on completion */ +#define T_L 0x08000000 /* last */ + +#define T_A 0x04000000 /* Address - the data transmitted as address + chars */ +#define T_TC 0x04000000 /* transmit CRC */ +#define T_CM 0x02000000 /* continuous mode */ +#define T_DEF 0x02000000 /* collision on previous attempt to transmit */ +#define T_P 0x01000000 /* Preamble - send Preamble sequence before + data */ +#define T_HB 0x01000000 /* heartbeat */ +#define T_NS 0x00800000 /* No Stop */ +#define T_LC 0x00800000 /* late collision */ +#define T_RL 0x00400000 /* retransmission limit */ +#define T_UN 0x00020000 /* underrun */ +#define T_CT 0x00010000 /* CTS lost */ +#define T_CSL 0x00010000 /* carrier sense lost */ +#define T_RC 0x003c0000 /* retry count */ + +/* Receive BD's status */ +#define R_E 0x80000000 /* buffer empty */ +#define R_W 0x20000000 /* wrap bit */ +#define R_I 0x10000000 /* interrupt on reception */ +#define R_L 0x08000000 /* last */ +#define R_C 0x08000000 /* the last byte in this buffer is a cntl + char */ +#define R_F 0x04000000 /* first */ +#define R_A 0x04000000 /* the first byte in this buffer is address + byte */ +#define R_CM 0x02000000 /* continuous mode */ +#define R_ID 0x01000000 /* buffer close on reception of idles */ +#define R_M 0x01000000 /* Frame received because of promiscuous + mode */ +#define R_AM 0x00800000 /* Address match */ +#define R_DE 0x00800000 /* Address match */ +#define R_LG 0x00200000 /* Break received */ +#define R_BR 0x00200000 /* Frame length violation */ +#define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */ +#define R_FR 0x00100000 /* Framing Error (no stop bit) character + received */ +#define R_PR 0x00080000 /* Parity Error character received */ +#define R_AB 0x00080000 /* Frame Aborted */ +#define R_SH 0x00080000 /* frame is too short */ +#define R_CR 0x00040000 /* CRC Error */ +#define R_OV 0x00020000 /* Overrun */ +#define R_CD 0x00010000 /* CD lost */ +#define R_CL 0x00010000 /* this frame is closed because of a + collision */ + +/* Rx Data buffer must be 4 bytes aligned in most cases.*/ +#define UCC_SLOW_RX_ALIGN 4 +#define UCC_SLOW_MRBLR_ALIGNMENT 4 +#define UCC_SLOW_PRAM_SIZE 0x100 +#define ALIGNMENT_OF_UCC_SLOW_PRAM 64 + +/* UCC Slow Channel Protocol Mode */ +enum ucc_slow_channel_protocol_mode { + UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002, + UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004, + UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008, +}; + +/* UCC Slow Transparent Transmit CRC (TCRC) */ +enum ucc_slow_transparent_tcrc { + /* 16-bit CCITT CRC (HDLC). (X16 + X12 + X5 + 1) */ + UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000, + /* CRC16 (BISYNC). (X16 + X15 + X2 + 1) */ + UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000, + /* 32-bit CCITT CRC (Ethernet and HDLC) */ + UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000, +}; + +/* UCC Slow oversampling rate for transmitter (TDCR) */ +enum ucc_slow_tx_oversampling_rate { + /* 1x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000, + /* 8x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000, + /* 16x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000, + /* 32x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000, +}; + +/* UCC Slow Oversampling rate for receiver (RDCR) +*/ +enum ucc_slow_rx_oversampling_rate { + /* 1x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000, + /* 8x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000, + /* 16x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000, + /* 32x clock mode */ + UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000, +}; + +/* UCC Slow Transmitter encoding method (TENC) +*/ +enum ucc_slow_tx_encoding_method { + UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000, + UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100 +}; + +/* UCC Slow Receiver decoding method (RENC) +*/ +enum ucc_slow_rx_decoding_method { + UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000, + UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800 +}; + +/* UCC Slow Diagnostic mode (DIAG) +*/ +enum ucc_slow_diag_mode { + UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000, + UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040, + UCC_SLOW_DIAG_MODE_ECHO = 0x00000080, + UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0 +}; + +struct ucc_slow_info { + int ucc_num; + int protocol; /* QE_CR_PROTOCOL_xxx */ + enum qe_clock rx_clock; + enum qe_clock tx_clock; + phys_addr_t regs; + int irq; + u16 uccm_mask; + int data_mem_part; + int init_tx; + int init_rx; + u32 tx_bd_ring_len; + u32 rx_bd_ring_len; + int rx_interrupts; + int brkpt_support; + int grant_support; + int tsa; + int cdp; + int cds; + int ctsp; + int ctss; + int rinv; + int tinv; + int rtsm; + int rfw; + int tci; + int tend; + int tfl; + int txsy; + u16 max_rx_buf_length; + enum ucc_slow_transparent_tcrc tcrc; + enum ucc_slow_channel_protocol_mode mode; + enum ucc_slow_diag_mode diag; + enum ucc_slow_tx_oversampling_rate tdcr; + enum ucc_slow_rx_oversampling_rate rdcr; + enum ucc_slow_tx_encoding_method tenc; + enum ucc_slow_rx_decoding_method renc; +}; + +struct ucc_slow_private { + struct ucc_slow_info *us_info; + struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ + struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ + u32 us_pram_offset; + int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ + int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ + int stopped_tx; /* Whether channel has been stopped for Tx + (STOP_TX, etc.) */ + int stopped_rx; /* Whether channel has been stopped for Rx */ + struct list_head confQ; /* frames passed to chip waiting for tx */ + u32 first_tx_bd_mask; /* mask is used in Tx routine to save status + and length for first BD in a frame */ + u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ + u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ + struct qe_bd *confBd; /* next BD for confirm after Tx */ + struct qe_bd *tx_bd; /* next BD for new Tx request */ + struct qe_bd *rx_bd; /* next BD to collect after Rx */ + void *p_rx_frame; /* accumulating receive frame */ + u16 *p_ucce; /* a pointer to the event register in memory. + */ + u16 *p_uccm; /* a pointer to the mask register in memory */ + u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ +#ifdef STATISTICS + u32 tx_frames; /* Transmitted frames counters */ + u32 rx_frames; /* Received frames counters (only frames + passed to application) */ + u32 rx_discarded; /* Discarded frames counters (frames that + were discarded by the driver due to + errors) */ +#endif /* STATISTICS */ +}; + +/* ucc_slow_init + * Initializes Slow UCC according to provided parameters. + * + * us_info - (In) pointer to the slow UCC info structure. + * uccs_ret - (Out) pointer to the slow UCC structure. + */ +int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret); + +/* ucc_slow_free + * Frees all resources for slow UCC. + * + * uccs - (In) pointer to the slow UCC structure. + */ +void ucc_slow_free(struct ucc_slow_private * uccs); + +/* ucc_slow_enable + * Enables a fast UCC port. + * This routine enables Tx and/or Rx through the General UCC Mode Register. + * + * uccs - (In) pointer to the slow UCC structure. + * mode - (In) TX, RX, or both. + */ +void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode); + +/* ucc_slow_disable + * Disables a fast UCC port. + * This routine disables Tx and/or Rx through the General UCC Mode Register. + * + * uccs - (In) pointer to the slow UCC structure. + * mode - (In) TX, RX, or both. + */ +void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode); + +/* ucc_slow_graceful_stop_tx + * Smoothly stops transmission on a specified slow UCC. + * + * uccs - (In) pointer to the slow UCC structure. + */ +void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs); + +/* ucc_slow_stop_tx + * Stops transmission on a specified slow UCC. + * + * uccs - (In) pointer to the slow UCC structure. + */ +void ucc_slow_stop_tx(struct ucc_slow_private * uccs); + +/* ucc_slow_restart_tx + * Restarts transmitting on a specified slow UCC. + * + * uccs - (In) pointer to the slow UCC structure. + */ +void ucc_slow_restart_tx(struct ucc_slow_private *uccs); + +u32 ucc_slow_get_qe_cr_subblock(int uccs_num); + +#endif /* __UCC_SLOW_H__ */ diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index 74bc85473b58..15aa5f07c955 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h @@ -417,11 +417,13 @@ #define AC97_RATES_MIC_ADC 4 #define AC97_RATES_SPDIF 5 +#define AC97_NUM_GPIOS 16 /* * */ struct snd_ac97; +struct snd_ac97_gpio_priv; struct snd_pcm_chmap; struct snd_ac97_build_ops { @@ -529,6 +531,7 @@ struct snd_ac97 { struct delayed_work power_work; #endif struct device dev; + struct snd_ac97_gpio_priv *gpio_priv; struct snd_pcm_chmap *chmaps[2]; /* channel-maps (optional) */ }; diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index fa1d05512c09..c0abcdc11470 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -152,13 +152,18 @@ struct snd_compr { unsigned int direction; struct mutex lock; int device; +#ifdef CONFIG_SND_VERBOSE_PROCFS + char id[64]; + struct snd_info_entry *proc_root; + struct snd_info_entry *proc_info_entry; +#endif }; /* compress device register APIs */ int snd_compress_register(struct snd_compr *device); int snd_compress_deregister(struct snd_compr *device); int snd_compress_new(struct snd_card *card, int device, - int type, struct snd_compr *compr); + int type, const char *id, struct snd_compr *compr); /* dsp driver callback apis * For playback: driver should call snd_compress_fragment_elapsed() to let the diff --git a/include/sound/core.h b/include/sound/core.h index cdfecafff0f4..31079ea5e484 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -99,6 +99,7 @@ struct snd_card { char driver[16]; /* driver name */ char shortname[32]; /* short name of this soundcard */ char longname[80]; /* name of this soundcard */ + char irq_descr[32]; /* Interrupt description */ char mixername[80]; /* mixer name */ char components[128]; /* card components delimited with space */ diff --git a/include/sound/da7218.h b/include/sound/da7218.h new file mode 100644 index 000000000000..0dbb818ac116 --- /dev/null +++ b/include/sound/da7218.h @@ -0,0 +1,109 @@ +/* + * da7218.h - DA7218 ASoC Codec Driver Platform Data + * + * Copyright (c) 2015 Dialog Semiconductor + * + * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _DA7218_PDATA_H +#define _DA7218_PDATA_H + +/* Mic Bias */ +enum da7218_micbias_voltage { + DA7218_MICBIAS_1_2V = -1, + DA7218_MICBIAS_1_6V, + DA7218_MICBIAS_1_8V, + DA7218_MICBIAS_2_0V, + DA7218_MICBIAS_2_2V, + DA7218_MICBIAS_2_4V, + DA7218_MICBIAS_2_6V, + DA7218_MICBIAS_2_8V, + DA7218_MICBIAS_3_0V, +}; + +enum da7218_mic_amp_in_sel { + DA7218_MIC_AMP_IN_SEL_DIFF = 0, + DA7218_MIC_AMP_IN_SEL_SE_P, + DA7218_MIC_AMP_IN_SEL_SE_N, +}; + +/* DMIC */ +enum da7218_dmic_data_sel { + DA7218_DMIC_DATA_LRISE_RFALL = 0, + DA7218_DMIC_DATA_LFALL_RRISE, +}; + +enum da7218_dmic_samplephase { + DA7218_DMIC_SAMPLE_ON_CLKEDGE = 0, + DA7218_DMIC_SAMPLE_BETWEEN_CLKEDGE, +}; + +enum da7218_dmic_clk_rate { + DA7218_DMIC_CLK_3_0MHZ = 0, + DA7218_DMIC_CLK_1_5MHZ, +}; + +/* Headphone Detect */ +enum da7218_hpldet_jack_rate { + DA7218_HPLDET_JACK_RATE_5US = 0, + DA7218_HPLDET_JACK_RATE_10US, + DA7218_HPLDET_JACK_RATE_20US, + DA7218_HPLDET_JACK_RATE_40US, + DA7218_HPLDET_JACK_RATE_80US, + DA7218_HPLDET_JACK_RATE_160US, + DA7218_HPLDET_JACK_RATE_320US, + DA7218_HPLDET_JACK_RATE_640US, +}; + +enum da7218_hpldet_jack_debounce { + DA7218_HPLDET_JACK_DEBOUNCE_OFF = 0, + DA7218_HPLDET_JACK_DEBOUNCE_2, + DA7218_HPLDET_JACK_DEBOUNCE_3, + DA7218_HPLDET_JACK_DEBOUNCE_4, +}; + +enum da7218_hpldet_jack_thr { + DA7218_HPLDET_JACK_THR_84PCT = 0, + DA7218_HPLDET_JACK_THR_88PCT, + DA7218_HPLDET_JACK_THR_92PCT, + DA7218_HPLDET_JACK_THR_96PCT, +}; + +struct da7218_hpldet_pdata { + enum da7218_hpldet_jack_rate jack_rate; + enum da7218_hpldet_jack_debounce jack_debounce; + enum da7218_hpldet_jack_thr jack_thr; + bool comp_inv; + bool hyst; + bool discharge; +}; + +struct da7218_pdata { + /* Mic */ + enum da7218_micbias_voltage micbias1_lvl; + enum da7218_micbias_voltage micbias2_lvl; + enum da7218_mic_amp_in_sel mic1_amp_in_sel; + enum da7218_mic_amp_in_sel mic2_amp_in_sel; + + /* DMIC */ + enum da7218_dmic_data_sel dmic1_data_sel; + enum da7218_dmic_data_sel dmic2_data_sel; + enum da7218_dmic_samplephase dmic1_samplephase; + enum da7218_dmic_samplephase dmic2_samplephase; + enum da7218_dmic_clk_rate dmic1_clk_rate; + enum da7218_dmic_clk_rate dmic2_clk_rate; + + /* HP Diff Supply - DA7217 only */ + bool hp_diff_single_supply; + + /* HP Detect - DA7218 only */ + struct da7218_hpldet_pdata *hpldet_pdata; +}; + +#endif /* _DA7218_PDATA_H */ diff --git a/include/sound/da7219.h b/include/sound/da7219.h index 3f39e135312d..02876acdc840 100644 --- a/include/sound/da7219.h +++ b/include/sound/da7219.h @@ -14,17 +14,10 @@ #ifndef __DA7219_PDATA_H #define __DA7219_PDATA_H -/* LDO */ -enum da7219_ldo_lvl_sel { - DA7219_LDO_LVL_SEL_1_05V = 0, - DA7219_LDO_LVL_SEL_1_10V, - DA7219_LDO_LVL_SEL_1_20V, - DA7219_LDO_LVL_SEL_1_40V, -}; - /* Mic Bias */ enum da7219_micbias_voltage { - DA7219_MICBIAS_1_8V = 1, + DA7219_MICBIAS_1_6V = 0, + DA7219_MICBIAS_1_8V, DA7219_MICBIAS_2_0V, DA7219_MICBIAS_2_2V, DA7219_MICBIAS_2_4V, @@ -41,9 +34,6 @@ enum da7219_mic_amp_in_sel { struct da7219_aad_pdata; struct da7219_pdata { - /* Internal LDO */ - enum da7219_ldo_lvl_sel ldo_lvl_sel; - /* Mic */ enum da7219_micbias_voltage micbias_lvl; enum da7219_mic_amp_in_sel mic_amp_in_sel; diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h index 8966ba7c9629..5681855396c4 100644 --- a/include/sound/designware_i2s.h +++ b/include/sound/designware_i2s.h @@ -45,6 +45,12 @@ struct i2s_platform_data { u32 snd_fmts; u32 snd_rates; + #define DW_I2S_QUIRK_COMP_REG_OFFSET (1 << 0) + #define DW_I2S_QUIRK_COMP_PARAM1 (1 << 1) + unsigned int quirks; + unsigned int i2s_reg_comp1; + unsigned int i2s_reg_comp2; + void *play_dma_data; void *capture_dma_data; bool (*filter)(struct dma_chan *chan, void *slave); diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index 930b41e5acf4..fa341fcb5829 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h @@ -10,6 +10,9 @@ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); int snd_hdac_display_power(struct hdac_bus *bus, bool enable); int snd_hdac_get_display_clk(struct hdac_bus *bus); +int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); +int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, + bool *audio_enabled, char *buffer, int max_bytes); int snd_hdac_i915_init(struct hdac_bus *bus); int snd_hdac_i915_exit(struct hdac_bus *bus); int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *); @@ -26,6 +29,17 @@ static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) { return 0; } +static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, + int rate) +{ + return 0; +} +static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, + bool *audio_enabled, char *buffer, + int max_bytes) +{ + return -ENODEV; +} static inline int snd_hdac_i915_init(struct hdac_bus *bus) { return -ENODEV; diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 94dc6a9772e0..ff1aecf325e8 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -233,6 +233,15 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_MLCTL_SPA (1<<16) #define AZX_MLCTL_CPA 23 + +/* registers for DMA Resume Capability Structure */ +#define AZX_DRSM_CAP_ID 0x5 +#define AZX_REG_DRSM_CTL 0x4 +/* Base used to calculate the iterating register offset */ +#define AZX_DRSM_BASE 0x08 +/* Interval used to calculate the iterating register offset */ +#define AZX_DRSM_INTERVAL 0x08 + /* * helpers to read the stream position */ diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index a4cadd9c297a..07fa59237feb 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -12,6 +12,7 @@ * @spbcap: SPIB capabilities pointer * @mlcap: MultiLink capabilities pointer * @gtscap: gts capabilities pointer + * @drsmcap: dma resume capabilities pointer * @hlink_list: link list of HDA links */ struct hdac_ext_bus { @@ -23,6 +24,7 @@ struct hdac_ext_bus { void __iomem *spbcap; void __iomem *mlcap; void __iomem *gtscap; + void __iomem *drsmcap; struct list_head hlink_list; }; @@ -72,6 +74,9 @@ enum hdac_ext_stream_type { * @pplc_addr: processing pipe link stream pointer * @spib_addr: software position in buffers stream pointer * @fifo_addr: software position Max fifos stream pointer + * @dpibr_addr: DMA position in buffer resume pointer + * @dpib: DMA position in buffer + * @lpib: Linear position in buffer * @decoupled: stream host and link is decoupled * @link_locked: link is locked * @link_prepared: link is prepared @@ -86,6 +91,10 @@ struct hdac_ext_stream { void __iomem *spib_addr; void __iomem *fifo_addr; + void __iomem *dpibr_addr; + + u32 dpib; + u32 lpib; bool decoupled:1; bool link_locked:1; bool link_prepared; @@ -116,6 +125,11 @@ int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus, struct hdac_ext_stream *stream, u32 value); int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus, struct hdac_ext_stream *stream); +void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus, + bool enable, int index); +int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus, + struct hdac_ext_stream *stream, u32 value); +int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value); void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream); void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream); @@ -133,6 +147,7 @@ struct hdac_ext_link { int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link); int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link); +int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus); int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus); void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link, int stream); @@ -186,9 +201,15 @@ struct hdac_ext_device { /* codec ops */ struct hdac_ext_codec_ops ops; + struct snd_card *card; + void *scodec; void *private_data; }; +struct hdac_ext_dma_params { + u32 format; + u8 stream_tag; +}; #define to_ehdac_device(dev) (container_of((dev), \ struct hdac_ext_device, hdac)) /* diff --git a/include/sound/i2c.h b/include/sound/i2c.h index d125ff8c85e8..835254de2039 100644 --- a/include/sound/i2c.h +++ b/include/sound/i2c.h @@ -66,7 +66,7 @@ struct snd_i2c_bus { struct snd_i2c_bit_ops *bit; void *ops; } hw_ops; /* lowlevel operations */ - struct snd_i2c_ops *ops; /* midlevel operations */ + const struct snd_i2c_ops *ops; /* midlevel operations */ unsigned long private_value; void *private_data; diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index f6cbef78db62..fdabbb4ddba9 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -130,7 +130,7 @@ struct snd_rawmidi { int ossreg; #endif - struct snd_rawmidi_global_ops *ops; + const struct snd_rawmidi_global_ops *ops; struct snd_rawmidi_str streams[2]; diff --git a/include/sound/rt5659.h b/include/sound/rt5659.h new file mode 100644 index 000000000000..656c4d58948d --- /dev/null +++ b/include/sound/rt5659.h @@ -0,0 +1,49 @@ +/* + * linux/sound/rt5659.h -- Platform data for RT5659 + * + * Copyright 2013 Realtek Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_SND_RT5659_H +#define __LINUX_SND_RT5659_H + +enum rt5659_dmic1_data_pin { + RT5659_DMIC1_NULL, + RT5659_DMIC1_DATA_IN2N, + RT5659_DMIC1_DATA_GPIO5, + RT5659_DMIC1_DATA_GPIO9, + RT5659_DMIC1_DATA_GPIO11, +}; + +enum rt5659_dmic2_data_pin { + RT5659_DMIC2_NULL, + RT5659_DMIC2_DATA_IN2P, + RT5659_DMIC2_DATA_GPIO6, + RT5659_DMIC2_DATA_GPIO10, + RT5659_DMIC2_DATA_GPIO12, +}; + +enum rt5659_jd_src { + RT5659_JD_NULL, + RT5659_JD3, +}; + +struct rt5659_platform_data { + bool in1_diff; + bool in3_diff; + bool in4_diff; + + int ldo1_en; /* GPIO for LDO1_EN */ + int reset; /* GPIO for RESET */ + + enum rt5659_dmic1_data_pin dmic1_data_pin; + enum rt5659_dmic2_data_pin dmic2_data_pin; + enum rt5659_jd_src jd_src; +}; + +#endif + diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 212eaaf172ed..964b7de1a1cc 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -222,6 +222,7 @@ struct snd_soc_dai_driver { const char *name; unsigned int id; unsigned int base; + struct snd_soc_dobj dobj; /* DAI driver callbacks */ int (*probe)(struct snd_soc_dai *dai); diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 95a937eafb79..97069466c38d 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -49,6 +49,9 @@ struct device; #define SND_SOC_DAPM_SIGGEN(wname) \ { .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \ .num_kcontrols = 0, .reg = SND_SOC_NOPM } +#define SND_SOC_DAPM_SINK(wname) \ +{ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_INPUT(wname) \ { .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \ .num_kcontrols = 0, .reg = SND_SOC_NOPM } @@ -485,6 +488,7 @@ enum snd_soc_dapm_type { snd_soc_dapm_aif_in, /* audio interface input */ snd_soc_dapm_aif_out, /* audio interface output */ snd_soc_dapm_siggen, /* signal generator */ + snd_soc_dapm_sink, snd_soc_dapm_dai_in, /* link to DAI structure */ snd_soc_dapm_dai_out, snd_soc_dapm_dai_link, /* link between two DAI structures */ diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h index 086cd7ff6ddc..5b68e3f5aa85 100644 --- a/include/sound/soc-topology.h +++ b/include/sound/soc-topology.h @@ -92,8 +92,10 @@ struct snd_soc_tplg_kcontrol_ops { /* Bytes ext operations, for TLV byte controls */ struct snd_soc_tplg_bytes_ext_ops { u32 id; - int (*get)(unsigned int __user *bytes, unsigned int size); - int (*put)(const unsigned int __user *bytes, unsigned int size); + int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes, + unsigned int size); + int (*put)(struct snd_kcontrol *kcontrol, + const unsigned int __user *bytes, unsigned int size); }; /* diff --git a/include/sound/soc.h b/include/sound/soc.h index a8b4b9c8b1d2..7afb72ceac56 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -110,6 +110,14 @@ .put = snd_soc_put_volsw, \ .private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \ max, invert, 0) } +#define SOC_DOUBLE_STS(xname, reg, shift_left, shift_right, max, invert) \ +{ \ + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ + .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \ + .access = SNDRV_CTL_ELEM_ACCESS_READ | \ + SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ + .private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \ + max, invert, 0) } #define SOC_DOUBLE_R(xname, reg_left, reg_right, xshift, xmax, xinvert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ .info = snd_soc_info_volsw, \ @@ -293,6 +301,9 @@ {.base = xbase, .num_regs = xregs, \ .mask = xmask }) } +/* + * SND_SOC_BYTES_EXT is deprecated, please USE SND_SOC_BYTES_TLV instead + */ #define SND_SOC_BYTES_EXT(xname, xcount, xhandler_get, xhandler_put) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = snd_soc_bytes_info_ext, \ @@ -787,6 +798,7 @@ struct snd_soc_component { unsigned int registered_as_component:1; struct list_head list; + struct list_head list_aux; /* for auxiliary component of the card */ struct snd_soc_dai_driver *dai_drv; int num_dai; @@ -830,6 +842,9 @@ struct snd_soc_component { int (*probe)(struct snd_soc_component *); void (*remove)(struct snd_soc_component *); + /* machine specific init */ + int (*init)(struct snd_soc_component *component); + #ifdef CONFIG_DEBUG_FS void (*init_debugfs)(struct snd_soc_component *component); const char *debugfs_prefix; @@ -1037,6 +1052,9 @@ struct snd_soc_dai_link { /* pmdown_time is ignored at stop */ unsigned int ignore_pmdown_time:1; + + struct list_head list; /* DAI link list of the soc card */ + struct snd_soc_dobj dobj; /* For topology */ }; struct snd_soc_codec_conf { @@ -1101,12 +1119,20 @@ struct snd_soc_card { struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level); + int (*add_dai_link)(struct snd_soc_card *, + struct snd_soc_dai_link *link); + void (*remove_dai_link)(struct snd_soc_card *, + struct snd_soc_dai_link *link); + long pmdown_time; /* CPU <--> Codec DAI links */ - struct snd_soc_dai_link *dai_link; - int num_links; - struct snd_soc_pcm_runtime *rtd; + struct snd_soc_dai_link *dai_link; /* predefined links only */ + int num_links; /* predefined links only */ + struct list_head dai_link_list; /* all links */ + int num_dai_links; + + struct list_head rtd_list; int num_rtd; /* optional codec specific configuration */ @@ -1119,8 +1145,7 @@ struct snd_soc_card { */ struct snd_soc_aux_dev *aux_dev; int num_aux_devs; - struct snd_soc_pcm_runtime *rtd_aux; - int num_aux_rtd; + struct list_head aux_comp_list; const struct snd_kcontrol_new *controls; int num_controls; @@ -1201,6 +1226,9 @@ struct snd_soc_pcm_runtime { struct dentry *debugfs_dpcm_root; struct dentry *debugfs_dpcm_state; #endif + + unsigned int num; /* 0-based and monotonic increasing */ + struct list_head list; /* rtd list of the soc card */ }; /* mixer control */ @@ -1225,8 +1253,10 @@ struct soc_bytes_ext { struct snd_soc_dobj dobj; /* used for TLV byte control */ - int (*get)(unsigned int __user *bytes, unsigned int size); - int (*put)(const unsigned int __user *bytes, unsigned int size); + int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes, + unsigned int size); + int (*put)(struct snd_kcontrol *kcontrol, const unsigned int __user *bytes, + unsigned int size); }; /* multi register control */ @@ -1523,6 +1553,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card) INIT_LIST_HEAD(&card->widgets); INIT_LIST_HEAD(&card->paths); INIT_LIST_HEAD(&card->dapm_list); + INIT_LIST_HEAD(&card->aux_comp_list); } static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc) @@ -1644,6 +1675,14 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev, struct device_node *of_node, struct snd_soc_dai_link *dai_link); +int snd_soc_add_dai_link(struct snd_soc_card *card, + struct snd_soc_dai_link *dai_link); +void snd_soc_remove_dai_link(struct snd_soc_card *card, + struct snd_soc_dai_link *dai_link); + +int snd_soc_register_dai(struct snd_soc_component *component, + struct snd_soc_dai_driver *dai_drv); + #include <sound/soc-dai.h> #ifdef CONFIG_DEBUG_FS @@ -1655,7 +1694,7 @@ extern const struct dev_pm_ops snd_soc_pm_ops; /* Helper functions */ static inline void snd_soc_dapm_mutex_lock(struct snd_soc_dapm_context *dapm) { - mutex_lock(&dapm->card->dapm_mutex); + mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); } static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm) diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 2d8639ea64d5..6e3945f64102 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -40,6 +40,11 @@ assign, print, reg, unreg) \ DEFINE_TRACE_FN(name, reg, unreg) +#undef TRACE_EVENT_FN_COND +#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ + assign, print, reg, unreg) \ + DEFINE_TRACE_FN(name, reg, unreg) + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ DEFINE_TRACE(name) @@ -93,6 +98,7 @@ #undef TRACE_EVENT #undef TRACE_EVENT_FN +#undef TRACE_EVENT_FN_COND #undef TRACE_EVENT_CONDITION #undef DECLARE_EVENT_CLASS #undef DEFINE_EVENT diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 00b4a6308249..a1b488809f06 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1265,6 +1265,44 @@ TRACE_EVENT(f2fs_destroy_extent_tree, __entry->node_cnt) ); +DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, + + TP_PROTO(struct super_block *sb, int type, int count), + + TP_ARGS(sb, type, count), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, type) + __field(int, count) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->type = type; + __entry->count = count; + ), + + TP_printk("dev = (%d,%d), %s, dirty count = %d", + show_dev(__entry), + show_file_type(__entry->type), + __entry->count) +); + +DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter, + + TP_PROTO(struct super_block *sb, int type, int count), + + TP_ARGS(sb, type, count) +); + +DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit, + + TP_PROTO(struct super_block *sb, int type, int count), + + TP_ARGS(sb, type, count) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h new file mode 100644 index 000000000000..4cf6bac4686d --- /dev/null +++ b/include/trace/events/fib6.h @@ -0,0 +1,76 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fib6 + +#if !defined(_TRACE_FIB6_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FIB6_H + +#include <linux/in6.h> +#include <net/flow.h> +#include <net/ip6_fib.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(fib6_table_lookup, + + TP_PROTO(const struct net *net, const struct rt6_info *rt, + u32 tb_id, const struct flowi6 *flp), + + TP_ARGS(net, rt, tb_id, flp), + + TP_STRUCT__entry( + __field( u32, tb_id ) + + __field( int, oif ) + __field( int, iif ) + __field( __u8, tos ) + __field( __u8, scope ) + __field( __u8, flags ) + __array( __u8, src, 16 ) + __array( __u8, dst, 16 ) + + __dynamic_array( char, name, IFNAMSIZ ) + __array( __u8, gw, 16 ) + ), + + TP_fast_assign( + struct in6_addr *in6; + + __entry->tb_id = tb_id; + __entry->oif = flp->flowi6_oif; + __entry->iif = flp->flowi6_iif; + __entry->tos = flp->flowi6_tos; + __entry->scope = flp->flowi6_scope; + __entry->flags = flp->flowi6_flags; + + in6 = (struct in6_addr *)__entry->src; + *in6 = flp->saddr; + + in6 = (struct in6_addr *)__entry->dst; + *in6 = flp->daddr; + + if (rt->rt6i_idev) { + __assign_str(name, rt->rt6i_idev->dev->name); + } else { + __assign_str(name, ""); + } + if (rt == net->ipv6.ip6_null_entry) { + struct in6_addr in6_zero = {}; + + in6 = (struct in6_addr *)__entry->gw; + *in6 = in6_zero; + + } else if (rt) { + in6 = (struct in6_addr *)__entry->gw; + *in6 = rt->rt6i_gateway; + } + ), + + TP_printk("table %3u oif %d iif %d src %pI6c dst %pI6c tos %d scope %d flags %x ==> dev %s gw %pI6c", + __entry->tb_id, __entry->oif, __entry->iif, + __entry->src, __entry->dst, __entry->tos, __entry->scope, + __entry->flags, __get_str(name), __entry->gw) +); + +#endif /* _TRACE_FIB6_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index c72f2dc01d0b..63a7680347cb 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -34,6 +34,83 @@ { F_WRLCK, "F_WRLCK" }, \ { F_UNLCK, "F_UNLCK" }) +TRACE_EVENT(locks_get_lock_context, + TP_PROTO(struct inode *inode, int type, struct file_lock_context *ctx), + + TP_ARGS(inode, type, ctx), + + TP_STRUCT__entry( + __field(unsigned long, i_ino) + __field(dev_t, s_dev) + __field(unsigned char, type) + __field(struct file_lock_context *, ctx) + ), + + TP_fast_assign( + __entry->s_dev = inode->i_sb->s_dev; + __entry->i_ino = inode->i_ino; + __entry->type = type; + __entry->ctx = ctx; + ), + + TP_printk("dev=0x%x:0x%x ino=0x%lx type=%s ctx=%p", + MAJOR(__entry->s_dev), MINOR(__entry->s_dev), + __entry->i_ino, show_fl_type(__entry->type), __entry->ctx) +); + +DECLARE_EVENT_CLASS(filelock_lock, + TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), + + TP_ARGS(inode, fl, ret), + + TP_STRUCT__entry( + __field(struct file_lock *, fl) + __field(unsigned long, i_ino) + __field(dev_t, s_dev) + __field(struct file_lock *, fl_next) + __field(fl_owner_t, fl_owner) + __field(unsigned int, fl_pid) + __field(unsigned int, fl_flags) + __field(unsigned char, fl_type) + __field(loff_t, fl_start) + __field(loff_t, fl_end) + __field(int, ret) + ), + + TP_fast_assign( + __entry->fl = fl ? fl : NULL; + __entry->s_dev = inode->i_sb->s_dev; + __entry->i_ino = inode->i_ino; + __entry->fl_next = fl ? fl->fl_next : NULL; + __entry->fl_owner = fl ? fl->fl_owner : NULL; + __entry->fl_pid = fl ? fl->fl_pid : 0; + __entry->fl_flags = fl ? fl->fl_flags : 0; + __entry->fl_type = fl ? fl->fl_type : 0; + __entry->fl_start = fl ? fl->fl_start : 0; + __entry->fl_end = fl ? fl->fl_end : 0; + __entry->ret = ret; + ), + + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d", + __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), + __entry->i_ino, __entry->fl_next, __entry->fl_owner, + __entry->fl_pid, show_fl_flags(__entry->fl_flags), + show_fl_type(__entry->fl_type), + __entry->fl_start, __entry->fl_end, __entry->ret) +); + +DEFINE_EVENT(filelock_lock, posix_lock_inode, + TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), + TP_ARGS(inode, fl, ret)); + +DEFINE_EVENT(filelock_lock, fcntl_setlk, + TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), + TP_ARGS(inode, fl, ret)); + +DEFINE_EVENT(filelock_lock, locks_remove_posix, + TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), + TP_ARGS(inode, fl, ret)); + DECLARE_EVENT_CLASS(filelock_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h new file mode 100644 index 000000000000..0f803d2783e3 --- /dev/null +++ b/include/trace/events/huge_memory.h @@ -0,0 +1,137 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM huge_memory + +#if !defined(__HUGE_MEMORY_H) || defined(TRACE_HEADER_MULTI_READ) +#define __HUGE_MEMORY_H + +#include <linux/tracepoint.h> + +#include <trace/events/gfpflags.h> + +#define SCAN_STATUS \ + EM( SCAN_FAIL, "failed") \ + EM( SCAN_SUCCEED, "succeeded") \ + EM( SCAN_PMD_NULL, "pmd_null") \ + EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ + EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ + EM( SCAN_PAGE_RO, "no_writable_page") \ + EM( SCAN_NO_REFERENCED_PAGE, "no_referenced_page") \ + EM( SCAN_PAGE_NULL, "page_null") \ + EM( SCAN_SCAN_ABORT, "scan_aborted") \ + EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ + EM( SCAN_PAGE_LRU, "page_not_in_lru") \ + EM( SCAN_PAGE_LOCK, "page_locked") \ + EM( SCAN_PAGE_ANON, "page_not_anon") \ + EM( SCAN_PAGE_COMPOUND, "page_compound") \ + EM( SCAN_ANY_PROCESS, "no_process_for_page") \ + EM( SCAN_VMA_NULL, "vma_null") \ + EM( SCAN_VMA_CHECK, "vma_check_failed") \ + EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \ + EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ + EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ + EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ + EMe( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") + +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +SCAN_STATUS + +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +TRACE_EVENT(mm_khugepaged_scan_pmd, + + TP_PROTO(struct mm_struct *mm, unsigned long pfn, bool writable, + bool referenced, int none_or_zero, int status), + + TP_ARGS(mm, pfn, writable, referenced, none_or_zero, status), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, pfn) + __field(bool, writable) + __field(bool, referenced) + __field(int, none_or_zero) + __field(int, status) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->pfn = pfn; + __entry->writable = writable; + __entry->referenced = referenced; + __entry->none_or_zero = none_or_zero; + __entry->status = status; + ), + + TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s", + __entry->mm, + __entry->pfn, + __entry->writable, + __entry->referenced, + __entry->none_or_zero, + __print_symbolic(__entry->status, SCAN_STATUS)) +); + +TRACE_EVENT(mm_collapse_huge_page, + + TP_PROTO(struct mm_struct *mm, int isolated, int status), + + TP_ARGS(mm, isolated, status), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(int, isolated) + __field(int, status) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->isolated = isolated; + __entry->status = status; + ), + + TP_printk("mm=%p, isolated=%d, status=%s", + __entry->mm, + __entry->isolated, + __print_symbolic(__entry->status, SCAN_STATUS)) +); + +TRACE_EVENT(mm_collapse_huge_page_isolate, + + TP_PROTO(unsigned long pfn, int none_or_zero, + bool referenced, bool writable, int status), + + TP_ARGS(pfn, none_or_zero, referenced, writable, status), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(int, none_or_zero) + __field(bool, referenced) + __field(bool, writable) + __field(int, status) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->none_or_zero = none_or_zero; + __entry->referenced = referenced; + __entry->writable = writable; + __entry->status = status; + ), + + TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, writable=%d, status=%s", + __entry->pfn, + __entry->none_or_zero, + __entry->referenced, + __entry->writable, + __print_symbolic(__entry->status, SCAN_STATUS)) +); + +#endif /* __HUGE_MEMORY_H */ +#include <trace/define_trace.h> diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h new file mode 100644 index 000000000000..6fb644029c80 --- /dev/null +++ b/include/trace/events/page_isolation.h @@ -0,0 +1,38 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM page_isolation + +#if !defined(_TRACE_PAGE_ISOLATION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_ISOLATION_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(test_pages_isolated, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long fin_pfn), + + TP_ARGS(start_pfn, end_pfn, fin_pfn), + + TP_STRUCT__entry( + __field(unsigned long, start_pfn) + __field(unsigned long, end_pfn) + __field(unsigned long, fin_pfn) + ), + + TP_fast_assign( + __entry->start_pfn = start_pfn; + __entry->end_pfn = end_pfn; + __entry->fin_pfn = fin_pfn; + ), + + TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", + __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, + __entry->end_pfn == __entry->fin_pfn ? "success" : "fail") +); + +#endif /* _TRACE_PAGE_ISOLATION_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h index 22afa26e34b2..ee7754c6e4a1 100644 --- a/include/trace/events/v4l2.h +++ b/include/trace/events/v4l2.h @@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(vb2_v4l2_event_class, __field(int, minor) __field(u32, flags) __field(u32, field) - __field(s64, timestamp) + __field(u64, timestamp) __field(u32, timecode_type) __field(u32, timecode_flags) __field(u8, timecode_frames) @@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(vb2_v4l2_event_class, __entry->minor = owner ? owner->vdev->minor : -1; __entry->flags = vbuf->flags; __entry->field = vbuf->field; - __entry->timestamp = timeval_to_ns(&vbuf->timestamp); + __entry->timestamp = vb->timestamp; __entry->timecode_type = vbuf->timecode.type; __entry->timecode_flags = vbuf->timecode.flags; __entry->timecode_frames = vbuf->timecode.frames; diff --git a/include/trace/events/vb2.h b/include/trace/events/vb2.h index bfeceeba3744..c1a22416ed05 100644 --- a/include/trace/events/vb2.h +++ b/include/trace/events/vb2.h @@ -18,6 +18,7 @@ DECLARE_EVENT_CLASS(vb2_event_class, __field(u32, index) __field(u32, type) __field(u32, bytesused) + __field(u64, timestamp) ), TP_fast_assign( @@ -28,14 +29,16 @@ DECLARE_EVENT_CLASS(vb2_event_class, __entry->index = vb->index; __entry->type = vb->type; __entry->bytesused = vb->planes[0].bytesused; + __entry->timestamp = vb->timestamp; ), TP_printk("owner = %p, queued = %u, owned_by_drv = %d, index = %u, " - "type = %u, bytesused = %u", __entry->owner, + "type = %u, bytesused = %u, timestamp = %llu", __entry->owner, __entry->queued_count, __entry->owned_by_drv_count, __entry->index, __entry->type, - __entry->bytesused + __entry->bytesused, + __entry->timestamp ) ) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index f66476b96264..31763dd8db1c 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -330,10 +330,9 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, TRACE_EVENT(mm_vmscan_writepage, - TP_PROTO(struct page *page, - int reclaim_flags), + TP_PROTO(struct page *page), - TP_ARGS(page, reclaim_flags), + TP_ARGS(page), TP_STRUCT__entry( __field(unsigned long, pfn) @@ -342,7 +341,7 @@ TRACE_EVENT(mm_vmscan_writepage, TP_fast_assign( __entry->pfn = page_to_pfn(page); - __entry->reclaim_flags = reclaim_flags; + __entry->reclaim_flags = trace_reclaim_flags(page); ), TP_printk("page=%p pfn=%lu flags=%s", @@ -353,11 +352,11 @@ TRACE_EVENT(mm_vmscan_writepage, TRACE_EVENT(mm_vmscan_lru_shrink_inactive, - TP_PROTO(int nid, int zid, - unsigned long nr_scanned, unsigned long nr_reclaimed, - int priority, int reclaim_flags), + TP_PROTO(struct zone *zone, + unsigned long nr_scanned, unsigned long nr_reclaimed, + int priority, int file), - TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags), + TP_ARGS(zone, nr_scanned, nr_reclaimed, priority, file), TP_STRUCT__entry( __field(int, nid) @@ -369,12 +368,12 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, ), TP_fast_assign( - __entry->nid = nid; - __entry->zid = zid; + __entry->nid = zone_to_nid(zone); + __entry->zid = zone_idx(zone); __entry->nr_scanned = nr_scanned; __entry->nr_reclaimed = nr_reclaimed; __entry->priority = priority; - __entry->reclaim_flags = reclaim_flags; + __entry->reclaim_flags = trace_shrink_flags(file); ), TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index de996cf61053..170c93bbdbb7 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -123,6 +123,12 @@ TRACE_MAKE_SYSTEM_STR(); TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ +#undef TRACE_EVENT_FN_COND +#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ + assign, print, reg, unreg) \ + TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ + PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ + #undef TRACE_EVENT_FLAGS #define TRACE_EVENT_FLAGS(name, value) \ __TRACE_EVENT_FLAGS(name, value) diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index a74dd84bbb6d..58274382a616 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -41,6 +41,7 @@ #define MADV_DONTNEED 4 /* don't need these pages */ /* common parameters: try to keep these consistent across architectures */ +#define MADV_FREE 8 /* free pages only if memory pressure */ #define MADV_REMOVE 9 /* remove these pages & resources */ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 5c15c2a5c123..fb8a41668382 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -87,4 +87,7 @@ #define SO_ATTACH_BPF 50 #define SO_DETACH_BPF SO_DETACH_FILTER +#define SO_ATTACH_REUSEPORT_CBPF 51 +#define SO_ATTACH_REUSEPORT_EBPF 52 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 1324b0292ec2..2622b33fb2ec 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -715,9 +715,11 @@ __SYSCALL(__NR_userfaultfd, sys_userfaultfd) __SYSCALL(__NR_membarrier, sys_membarrier) #define __NR_mlock2 284 __SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 285 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) #undef __NR_syscalls -#define __NR_syscalls 285 +#define __NR_syscalls 286 /* * All syscalls below here should go away really, diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 38d437096c35..9355dd8eff3b 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -3,6 +3,7 @@ header-y += drm.h header-y += drm_fourcc.h header-y += drm_mode.h header-y += drm_sarea.h +header-y += amdgpu_drm.h header-y += exynos_drm.h header-y += i810_drm.h header-y += i915_drm.h @@ -17,4 +18,5 @@ header-y += tegra_drm.h header-y += via_drm.h header-y += vmwgfx_drm.h header-y += msm_drm.h +header-y += vc4_drm.h header-y += virtgpu_drm.h diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index e52933a73580..453a76af123c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -76,19 +76,19 @@ struct drm_amdgpu_gem_create_in { /** the requested memory size */ - uint64_t bo_size; + __u64 bo_size; /** physical start_addr alignment in bytes for some HW requirements */ - uint64_t alignment; + __u64 alignment; /** the requested memory domains */ - uint64_t domains; + __u64 domains; /** allocation flags */ - uint64_t domain_flags; + __u64 domain_flags; }; struct drm_amdgpu_gem_create_out { /** returned GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; }; union drm_amdgpu_gem_create { @@ -105,28 +105,28 @@ union drm_amdgpu_gem_create { struct drm_amdgpu_bo_list_in { /** Type of operation */ - uint32_t operation; + __u32 operation; /** Handle of list or 0 if we want to create one */ - uint32_t list_handle; + __u32 list_handle; /** Number of BOs in list */ - uint32_t bo_number; + __u32 bo_number; /** Size of each element describing BO */ - uint32_t bo_info_size; + __u32 bo_info_size; /** Pointer to array describing BOs */ - uint64_t bo_info_ptr; + __u64 bo_info_ptr; }; struct drm_amdgpu_bo_list_entry { /** Handle of BO */ - uint32_t bo_handle; + __u32 bo_handle; /** New (if specified) BO priority to be used during migration */ - uint32_t bo_priority; + __u32 bo_priority; }; struct drm_amdgpu_bo_list_out { /** Handle of resource list */ - uint32_t list_handle; - uint32_t _pad; + __u32 list_handle; + __u32 _pad; }; union drm_amdgpu_bo_list { @@ -150,26 +150,26 @@ union drm_amdgpu_bo_list { struct drm_amdgpu_ctx_in { /** AMDGPU_CTX_OP_* */ - uint32_t op; + __u32 op; /** For future use, no flags defined so far */ - uint32_t flags; - uint32_t ctx_id; - uint32_t _pad; + __u32 flags; + __u32 ctx_id; + __u32 _pad; }; union drm_amdgpu_ctx_out { struct { - uint32_t ctx_id; - uint32_t _pad; + __u32 ctx_id; + __u32 _pad; } alloc; struct { /** For future use, no flags defined so far */ - uint64_t flags; + __u64 flags; /** Number of resets caused by this context so far. */ - uint32_t hangs; + __u32 hangs; /** Reset status since the last call of the ioctl. */ - uint32_t reset_status; + __u32 reset_status; } state; }; @@ -189,12 +189,12 @@ union drm_amdgpu_ctx { #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) struct drm_amdgpu_gem_userptr { - uint64_t addr; - uint64_t size; + __u64 addr; + __u64 size; /* AMDGPU_GEM_USERPTR_* */ - uint32_t flags; + __u32 flags; /* Resulting GEM handle */ - uint32_t handle; + __u32 handle; }; /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ @@ -226,28 +226,28 @@ struct drm_amdgpu_gem_userptr { /** The same structure is shared for input/output */ struct drm_amdgpu_gem_metadata { /** GEM Object handle */ - uint32_t handle; + __u32 handle; /** Do we want get or set metadata */ - uint32_t op; + __u32 op; struct { /** For future use, no flags defined so far */ - uint64_t flags; + __u64 flags; /** family specific tiling info */ - uint64_t tiling_info; - uint32_t data_size_bytes; - uint32_t data[64]; + __u64 tiling_info; + __u32 data_size_bytes; + __u32 data[64]; } data; }; struct drm_amdgpu_gem_mmap_in { /** the GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; }; struct drm_amdgpu_gem_mmap_out { /** mmap offset from the vma offset manager */ - uint64_t addr_ptr; + __u64 addr_ptr; }; union drm_amdgpu_gem_mmap { @@ -257,18 +257,18 @@ union drm_amdgpu_gem_mmap { struct drm_amdgpu_gem_wait_idle_in { /** GEM object handle */ - uint32_t handle; + __u32 handle; /** For future use, no flags defined so far */ - uint32_t flags; + __u32 flags; /** Absolute timeout to wait */ - uint64_t timeout; + __u64 timeout; }; struct drm_amdgpu_gem_wait_idle_out { /** BO status: 0 - BO is idle, 1 - BO is busy */ - uint32_t status; + __u32 status; /** Returned current memory domain */ - uint32_t domain; + __u32 domain; }; union drm_amdgpu_gem_wait_idle { @@ -278,18 +278,18 @@ union drm_amdgpu_gem_wait_idle { struct drm_amdgpu_wait_cs_in { /** Command submission handle */ - uint64_t handle; + __u64 handle; /** Absolute timeout to wait */ - uint64_t timeout; - uint32_t ip_type; - uint32_t ip_instance; - uint32_t ring; - uint32_t ctx_id; + __u64 timeout; + __u32 ip_type; + __u32 ip_instance; + __u32 ring; + __u32 ctx_id; }; struct drm_amdgpu_wait_cs_out { /** CS status: 0 - CS completed, 1 - CS still busy */ - uint64_t status; + __u64 status; }; union drm_amdgpu_wait_cs { @@ -303,11 +303,11 @@ union drm_amdgpu_wait_cs { /* Sets or returns a value associated with a buffer. */ struct drm_amdgpu_gem_op { /** GEM object handle */ - uint32_t handle; + __u32 handle; /** AMDGPU_GEM_OP_* */ - uint32_t op; + __u32 op; /** Input or return value */ - uint64_t value; + __u64 value; }; #define AMDGPU_VA_OP_MAP 1 @@ -326,18 +326,18 @@ struct drm_amdgpu_gem_op { struct drm_amdgpu_gem_va { /** GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; /** AMDGPU_VA_OP_* */ - uint32_t operation; + __u32 operation; /** AMDGPU_VM_PAGE_* */ - uint32_t flags; + __u32 flags; /** va address to assign . Must be correctly aligned.*/ - uint64_t va_address; + __u64 va_address; /** Specify offset inside of BO to assign. Must be correctly aligned.*/ - uint64_t offset_in_bo; + __u64 offset_in_bo; /** Specify mapping size. Must be correctly aligned. */ - uint64_t map_size; + __u64 map_size; }; #define AMDGPU_HW_IP_GFX 0 @@ -354,24 +354,24 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 struct drm_amdgpu_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - uint64_t chunk_data; + __u32 chunk_id; + __u32 length_dw; + __u64 chunk_data; }; struct drm_amdgpu_cs_in { /** Rendering context id */ - uint32_t ctx_id; + __u32 ctx_id; /** Handle of resource list associated with CS */ - uint32_t bo_list_handle; - uint32_t num_chunks; - uint32_t _pad; - /** this points to uint64_t * which point to cs chunks */ - uint64_t chunks; + __u32 bo_list_handle; + __u32 num_chunks; + __u32 _pad; + /** this points to __u64 * which point to cs chunks */ + __u64 chunks; }; struct drm_amdgpu_cs_out { - uint64_t handle; + __u64 handle; }; union drm_amdgpu_cs { @@ -388,32 +388,32 @@ union drm_amdgpu_cs { #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) struct drm_amdgpu_cs_chunk_ib { - uint32_t _pad; + __u32 _pad; /** AMDGPU_IB_FLAG_* */ - uint32_t flags; + __u32 flags; /** Virtual address to begin IB execution */ - uint64_t va_start; + __u64 va_start; /** Size of submission */ - uint32_t ib_bytes; + __u32 ib_bytes; /** HW IP to submit to */ - uint32_t ip_type; + __u32 ip_type; /** HW IP index of the same type to submit to */ - uint32_t ip_instance; + __u32 ip_instance; /** Ring index to submit to */ - uint32_t ring; + __u32 ring; }; struct drm_amdgpu_cs_chunk_dep { - uint32_t ip_type; - uint32_t ip_instance; - uint32_t ring; - uint32_t ctx_id; - uint64_t handle; + __u32 ip_type; + __u32 ip_instance; + __u32 ring; + __u32 ctx_id; + __u64 handle; }; struct drm_amdgpu_cs_chunk_fence { - uint32_t handle; - uint32_t offset; + __u32 handle; + __u32 offset; }; struct drm_amdgpu_cs_chunk_data { @@ -486,83 +486,83 @@ struct drm_amdgpu_cs_chunk_data { /* Input structure for the INFO ioctl */ struct drm_amdgpu_info { /* Where the return value will be stored */ - uint64_t return_pointer; + __u64 return_pointer; /* The size of the return value. Just like "size" in "snprintf", * it limits how many bytes the kernel can write. */ - uint32_t return_size; + __u32 return_size; /* The query request id. */ - uint32_t query; + __u32 query; union { struct { - uint32_t id; - uint32_t _pad; + __u32 id; + __u32 _pad; } mode_crtc; struct { /** AMDGPU_HW_IP_* */ - uint32_t type; + __u32 type; /** * Index of the IP if there are more IPs of the same * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. */ - uint32_t ip_instance; + __u32 ip_instance; } query_hw_ip; struct { - uint32_t dword_offset; + __u32 dword_offset; /** number of registers to read */ - uint32_t count; - uint32_t instance; + __u32 count; + __u32 instance; /** For future use, no flags defined so far */ - uint32_t flags; + __u32 flags; } read_mmr_reg; struct { /** AMDGPU_INFO_FW_* */ - uint32_t fw_type; + __u32 fw_type; /** * Index of the IP if there are more IPs of * the same type. */ - uint32_t ip_instance; + __u32 ip_instance; /** * Index of the engine. Whether this is used depends * on the firmware type. (e.g. MEC, SDMA) */ - uint32_t index; - uint32_t _pad; + __u32 index; + __u32 _pad; } query_fw; }; }; struct drm_amdgpu_info_gds { /** GDS GFX partition size */ - uint32_t gds_gfx_partition_size; + __u32 gds_gfx_partition_size; /** GDS compute partition size */ - uint32_t compute_partition_size; + __u32 compute_partition_size; /** total GDS memory size */ - uint32_t gds_total_size; + __u32 gds_total_size; /** GWS size per GFX partition */ - uint32_t gws_per_gfx_partition; + __u32 gws_per_gfx_partition; /** GSW size per compute partition */ - uint32_t gws_per_compute_partition; + __u32 gws_per_compute_partition; /** OA size per GFX partition */ - uint32_t oa_per_gfx_partition; + __u32 oa_per_gfx_partition; /** OA size per compute partition */ - uint32_t oa_per_compute_partition; - uint32_t _pad; + __u32 oa_per_compute_partition; + __u32 _pad; }; struct drm_amdgpu_info_vram_gtt { - uint64_t vram_size; - uint64_t vram_cpu_accessible_size; - uint64_t gtt_size; + __u64 vram_size; + __u64 vram_cpu_accessible_size; + __u64 gtt_size; }; struct drm_amdgpu_info_firmware { - uint32_t ver; - uint32_t feature; + __u32 ver; + __u32 feature; }; #define AMDGPU_VRAM_TYPE_UNKNOWN 0 @@ -576,61 +576,61 @@ struct drm_amdgpu_info_firmware { struct drm_amdgpu_info_device { /** PCI Device ID */ - uint32_t device_id; + __u32 device_id; /** Internal chip revision: A0, A1, etc.) */ - uint32_t chip_rev; - uint32_t external_rev; + __u32 chip_rev; + __u32 external_rev; /** Revision id in PCI Config space */ - uint32_t pci_rev; - uint32_t family; - uint32_t num_shader_engines; - uint32_t num_shader_arrays_per_engine; + __u32 pci_rev; + __u32 family; + __u32 num_shader_engines; + __u32 num_shader_arrays_per_engine; /* in KHz */ - uint32_t gpu_counter_freq; - uint64_t max_engine_clock; - uint64_t max_memory_clock; + __u32 gpu_counter_freq; + __u64 max_engine_clock; + __u64 max_memory_clock; /* cu information */ - uint32_t cu_active_number; - uint32_t cu_ao_mask; - uint32_t cu_bitmap[4][4]; + __u32 cu_active_number; + __u32 cu_ao_mask; + __u32 cu_bitmap[4][4]; /** Render backend pipe mask. One render backend is CB+DB. */ - uint32_t enabled_rb_pipes_mask; - uint32_t num_rb_pipes; - uint32_t num_hw_gfx_contexts; - uint32_t _pad; - uint64_t ids_flags; + __u32 enabled_rb_pipes_mask; + __u32 num_rb_pipes; + __u32 num_hw_gfx_contexts; + __u32 _pad; + __u64 ids_flags; /** Starting virtual address for UMDs. */ - uint64_t virtual_address_offset; + __u64 virtual_address_offset; /** The maximum virtual address */ - uint64_t virtual_address_max; + __u64 virtual_address_max; /** Required alignment of virtual addresses. */ - uint32_t virtual_address_alignment; + __u32 virtual_address_alignment; /** Page table entry - fragment size */ - uint32_t pte_fragment_size; - uint32_t gart_page_size; + __u32 pte_fragment_size; + __u32 gart_page_size; /** constant engine ram size*/ - uint32_t ce_ram_size; + __u32 ce_ram_size; /** video memory type info*/ - uint32_t vram_type; + __u32 vram_type; /** video memory bit width*/ - uint32_t vram_bit_width; + __u32 vram_bit_width; /* vce harvesting instance */ - uint32_t vce_harvest_config; + __u32 vce_harvest_config; }; struct drm_amdgpu_info_hw_ip { /** Version of h/w IP */ - uint32_t hw_ip_version_major; - uint32_t hw_ip_version_minor; + __u32 hw_ip_version_major; + __u32 hw_ip_version_minor; /** Capabilities */ - uint64_t capabilities_flags; + __u64 capabilities_flags; /** command buffer address start alignment*/ - uint32_t ib_start_alignment; + __u32 ib_start_alignment; /** command buffer size alignment*/ - uint32_t ib_size_alignment; + __u32 ib_size_alignment; /** Bitmask of available rings. Bit 0 means ring 0, etc. */ - uint32_t available_rings; - uint32_t _pad; + __u32 available_rings; + __u32 _pad; }; /* diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h index 8dec3fdc99c7..6de7f0196ca0 100644 --- a/include/uapi/drm/armada_drm.h +++ b/include/uapi/drm/armada_drm.h @@ -9,6 +9,8 @@ #ifndef DRM_ARMADA_IOCTL_H #define DRM_ARMADA_IOCTL_H +#include "drm.h" + #define DRM_ARMADA_GEM_CREATE 0x00 #define DRM_ARMADA_GEM_MMAP 0x02 #define DRM_ARMADA_GEM_PWRITE 0x03 diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3801584a0c53..b4e92eb12044 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -54,6 +54,7 @@ typedef int32_t __s32; typedef uint32_t __u32; typedef int64_t __s64; typedef uint64_t __u64; +typedef size_t __kernel_size_t; typedef unsigned long drm_handle_t; #endif @@ -129,11 +130,11 @@ struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ - size_t name_len; /**< Length of name buffer */ + __kernel_size_t name_len; /**< Length of name buffer */ char __user *name; /**< Name of driver */ - size_t date_len; /**< Length of date buffer */ + __kernel_size_t date_len; /**< Length of date buffer */ char __user *date; /**< User-space buffer to hold date */ - size_t desc_len; /**< Length of desc buffer */ + __kernel_size_t desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ }; @@ -143,7 +144,7 @@ struct drm_version { * \sa drmGetBusid() and drmSetBusId(). */ struct drm_unique { - size_t unique_len; /**< Length of unique */ + __kernel_size_t unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ }; diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 0b69a7753558..4d8da699a623 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -24,7 +24,7 @@ #ifndef DRM_FOURCC_H #define DRM_FOURCC_H -#include <linux/types.h> +#include "drm.h" #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ ((__u32)(c) << 16) | ((__u32)(d) << 24)) @@ -225,7 +225,7 @@ * - multiple of 128 pixels for the width * - multiple of 32 pixels for the height * - * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html + * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html */ #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 6c11ca401de8..50adb46204c2 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -27,7 +27,7 @@ #ifndef _DRM_MODE_H #define _DRM_MODE_H -#include <linux/types.h> +#include "drm.h" #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 @@ -526,14 +526,14 @@ struct drm_mode_crtc_page_flip { /* create a dumb scanout buffer */ struct drm_mode_create_dumb { - uint32_t height; - uint32_t width; - uint32_t bpp; - uint32_t flags; + __u32 height; + __u32 width; + __u32 bpp; + __u32 flags; /* handle, pitch, size will be returned */ - uint32_t handle; - uint32_t pitch; - uint64_t size; + __u32 handle; + __u32 pitch; + __u64 size; }; /* set up for mmap of a dumb scanout buffer */ @@ -550,7 +550,7 @@ struct drm_mode_map_dumb { }; struct drm_mode_destroy_dumb { - uint32_t handle; + __u32 handle; }; /* page-flip flags are valid, plus: */ diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h index 413a5642d49f..1d1a858a203d 100644 --- a/include/uapi/drm/drm_sarea.h +++ b/include/uapi/drm/drm_sarea.h @@ -32,7 +32,7 @@ #ifndef _DRM_SAREA_H_ #define _DRM_SAREA_H_ -#include <drm/drm.h> +#include "drm.h" /* SAREA area needs to be at least a page */ #if defined(__alpha__) diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h new file mode 100644 index 000000000000..4cc989ad6851 --- /dev/null +++ b/include/uapi/drm/etnaviv_drm.h @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ETNAVIV_DRM_H__ +#define __ETNAVIV_DRM_H__ + +#include "drm.h" + +/* Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints: + * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit + * user/kernel compatibility + * 2) Keep fields aligned to their size + * 3) Because of how drm_ioctl() works, we can add new fields at + * the end of an ioctl if some care is taken: drm_ioctl() will + * zero out the new fields at the tail of the ioctl, so a zero + * value should have a backwards compatible meaning. And for + * output params, userspace won't see the newly added output + * fields.. so that has to be somehow ok. + */ + +/* timeouts are specified in clock-monotonic absolute times (to simplify + * restarting interrupted ioctls). The following struct is logically the + * same as 'struct timespec' but 32/64b ABI safe. + */ +struct drm_etnaviv_timespec { + __s64 tv_sec; /* seconds */ + __s64 tv_nsec; /* nanoseconds */ +}; + +#define ETNAVIV_PARAM_GPU_MODEL 0x01 +#define ETNAVIV_PARAM_GPU_REVISION 0x02 +#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03 +#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04 +#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05 +#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06 +#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 + +#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 +#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 +#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12 +#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13 +#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14 +#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15 +#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16 +#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17 +#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 +#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 + +#define ETNA_MAX_PIPES 4 + +struct drm_etnaviv_param { + __u32 pipe; /* in */ + __u32 param; /* in, ETNAVIV_PARAM_x */ + __u64 value; /* out (get_param) or in (set_param) */ +}; + +/* + * GEM buffers: + */ + +#define ETNA_BO_CACHE_MASK 0x000f0000 +/* cache modes */ +#define ETNA_BO_CACHED 0x00010000 +#define ETNA_BO_WC 0x00020000 +#define ETNA_BO_UNCACHED 0x00040000 +/* map flags */ +#define ETNA_BO_FORCE_MMU 0x00100000 + +struct drm_etnaviv_gem_new { + __u64 size; /* in */ + __u32 flags; /* in, mask of ETNA_BO_x */ + __u32 handle; /* out */ +}; + +struct drm_etnaviv_gem_info { + __u32 handle; /* in */ + __u32 pad; + __u64 offset; /* out, offset to pass to mmap() */ +}; + +#define ETNA_PREP_READ 0x01 +#define ETNA_PREP_WRITE 0x02 +#define ETNA_PREP_NOSYNC 0x04 + +struct drm_etnaviv_gem_cpu_prep { + __u32 handle; /* in */ + __u32 op; /* in, mask of ETNA_PREP_x */ + struct drm_etnaviv_timespec timeout; /* in */ +}; + +struct drm_etnaviv_gem_cpu_fini { + __u32 handle; /* in */ + __u32 flags; /* in, placeholder for now, no defined values */ +}; + +/* + * Cmdstream Submission: + */ + +/* The value written into the cmdstream is logically: + * relocbuf->gpuaddr + reloc_offset + * + * NOTE that reloc's must be sorted by order of increasing submit_offset, + * otherwise EINVAL. + */ +struct drm_etnaviv_gem_submit_reloc { + __u32 submit_offset; /* in, offset from submit_bo */ + __u32 reloc_idx; /* in, index of reloc_bo buffer */ + __u64 reloc_offset; /* in, offset from start of reloc_bo */ + __u32 flags; /* in, placeholder for now, no defined values */ +}; + +/* Each buffer referenced elsewhere in the cmdstream submit (ie. the + * cmdstream buffer(s) themselves or reloc entries) has one (and only + * one) entry in the submit->bos[] table. + * + * As a optimization, the current buffer (gpu virtual address) can be + * passed back through the 'presumed' field. If on a subsequent reloc, + * userspace passes back a 'presumed' address that is still valid, + * then patching the cmdstream for this entry is skipped. This can + * avoid kernel needing to map/access the cmdstream bo in the common + * case. + */ +#define ETNA_SUBMIT_BO_READ 0x0001 +#define ETNA_SUBMIT_BO_WRITE 0x0002 +struct drm_etnaviv_gem_submit_bo { + __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */ + __u32 handle; /* in, GEM handle */ + __u64 presumed; /* in/out, presumed buffer address */ +}; + +/* Each cmdstream submit consists of a table of buffers involved, and + * one or more cmdstream buffers. This allows for conditional execution + * (context-restore), and IB buffers needed for per tile/bin draw cmds. + */ +#define ETNA_PIPE_3D 0x00 +#define ETNA_PIPE_2D 0x01 +#define ETNA_PIPE_VG 0x02 +struct drm_etnaviv_gem_submit { + __u32 fence; /* out */ + __u32 pipe; /* in */ + __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */ + __u32 nr_bos; /* in, number of submit_bo's */ + __u32 nr_relocs; /* in, number of submit_reloc's */ + __u32 stream_size; /* in, cmdstream size */ + __u64 bos; /* in, ptr to array of submit_bo's */ + __u64 relocs; /* in, ptr to array of submit_reloc's */ + __u64 stream; /* in, ptr to cmdstream */ +}; + +/* The normal way to synchronize with the GPU is just to CPU_PREP on + * a buffer if you need to access it from the CPU (other cmdstream + * submission from same or other contexts, PAGE_FLIP ioctl, etc, all + * handle the required synchronization under the hood). This ioctl + * mainly just exists as a way to implement the gallium pipe_fence + * APIs without requiring a dummy bo to synchronize on. + */ +#define ETNA_WAIT_NONBLOCK 0x01 +struct drm_etnaviv_wait_fence { + __u32 pipe; /* in */ + __u32 fence; /* in */ + __u32 flags; /* in, mask of ETNA_WAIT_x */ + __u32 pad; + struct drm_etnaviv_timespec timeout; /* in */ +}; + +#define ETNA_USERPTR_READ 0x01 +#define ETNA_USERPTR_WRITE 0x02 +struct drm_etnaviv_gem_userptr { + __u64 user_ptr; /* in, page aligned user pointer */ + __u64 user_size; /* in, page aligned user size */ + __u32 flags; /* in, flags */ + __u32 handle; /* out, non-zero handle */ +}; + +struct drm_etnaviv_gem_wait { + __u32 pipe; /* in */ + __u32 handle; /* in, bo to be waited for */ + __u32 flags; /* in, mask of ETNA_WAIT_x */ + __u32 pad; + struct drm_etnaviv_timespec timeout; /* in */ +}; + +#define DRM_ETNAVIV_GET_PARAM 0x00 +/* placeholder: +#define DRM_ETNAVIV_SET_PARAM 0x01 + */ +#define DRM_ETNAVIV_GEM_NEW 0x02 +#define DRM_ETNAVIV_GEM_INFO 0x03 +#define DRM_ETNAVIV_GEM_CPU_PREP 0x04 +#define DRM_ETNAVIV_GEM_CPU_FINI 0x05 +#define DRM_ETNAVIV_GEM_SUBMIT 0x06 +#define DRM_ETNAVIV_WAIT_FENCE 0x07 +#define DRM_ETNAVIV_GEM_USERPTR 0x08 +#define DRM_ETNAVIV_GEM_WAIT 0x09 +#define DRM_ETNAVIV_NUM_IOCTLS 0x0a + +#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) +#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) +#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info) +#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep) +#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini) +#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit) +#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) +#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) +#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) + +#endif /* __ETNAVIV_DRM_H__ */ diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index 5575ed1598bd..312c67d744ae 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -15,7 +15,7 @@ #ifndef _UAPI_EXYNOS_DRM_H_ #define _UAPI_EXYNOS_DRM_H_ -#include <drm/drm.h> +#include "drm.h" /** * User-desired buffer creation information structure. @@ -27,7 +27,7 @@ * - this handle will be set by gem module of kernel side. */ struct drm_exynos_gem_create { - uint64_t size; + __u64 size; unsigned int flags; unsigned int handle; }; @@ -44,7 +44,7 @@ struct drm_exynos_gem_create { struct drm_exynos_gem_info { unsigned int handle; unsigned int flags; - uint64_t size; + __u64 size; }; /** @@ -58,7 +58,7 @@ struct drm_exynos_gem_info { struct drm_exynos_vidi_connection { unsigned int connection; unsigned int extensions; - uint64_t edid; + __u64 edid; }; /* memory type definitions. */ diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h index 34736efd5824..bdb028723ded 100644 --- a/include/uapi/drm/i810_drm.h +++ b/include/uapi/drm/i810_drm.h @@ -1,7 +1,7 @@ #ifndef _I810_DRM_H_ #define _I810_DRM_H_ -#include <drm/drm.h> +#include "drm.h" /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 484a9fb20479..acf21026c78a 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -27,7 +27,7 @@ #ifndef _UAPI_I915_DRM_H_ #define _UAPI_I915_DRM_H_ -#include <drm/drm.h> +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. @@ -356,6 +356,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_EU_TOTAL 34 #define I915_PARAM_HAS_GPU_RESET 35 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 +#define I915_PARAM_HAS_EXEC_SOFTPIN 37 typedef struct drm_i915_getparam { __s32 param; @@ -682,8 +683,12 @@ struct drm_i915_gem_exec_object2 { __u64 alignment; /** - * Returned value of the updated offset of the object, for future - * presumed_offset writes. + * When the EXEC_OBJECT_PINNED flag is specified this is populated by + * the user with the GTT offset at which this object will be pinned. + * When the I915_EXEC_NO_RELOC flag is specified this must contain the + * presumed_offset of the object. + * During execbuffer2 the kernel populates it with the value of the + * current GTT offset of the object, for future presumed_offset writes. */ __u64 offset; @@ -691,7 +696,8 @@ struct drm_i915_gem_exec_object2 { #define EXEC_OBJECT_NEEDS_GTT (1<<1) #define EXEC_OBJECT_WRITE (1<<2) #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) -#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1) +#define EXEC_OBJECT_PINNED (1<<4) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) __u64 flags; __u64 rsvd1; @@ -1079,6 +1085,12 @@ struct drm_i915_gem_context_destroy { }; struct drm_i915_reg_read { + /* + * Register offset. + * For 64bit wide registers where the upper 32bits don't immediately + * follow the lower 32bits, the offset of the lower 32bits must + * be specified + */ __u64 offset; __u64 val; /* Return value */ }; @@ -1125,8 +1137,9 @@ struct drm_i915_gem_context_param { __u32 ctx_id; __u32 size; __u64 param; -#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 -#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 __u64 value; }; diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h index 2375bfd6e5e9..fca817009e13 100644 --- a/include/uapi/drm/mga_drm.h +++ b/include/uapi/drm/mga_drm.h @@ -35,7 +35,7 @@ #ifndef __MGA_DRM_H__ #define __MGA_DRM_H__ -#include <drm/drm.h> +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (mga_sarea.h) diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 75a232b9a970..81e6e0d1d360 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -18,8 +18,7 @@ #ifndef __MSM_DRM_H__ #define __MSM_DRM_H__ -#include <stddef.h> -#include <drm/drm.h> +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints: @@ -122,7 +121,7 @@ struct drm_msm_gem_cpu_fini { struct drm_msm_gem_submit_reloc { __u32 submit_offset; /* in, offset from submit_bo */ __u32 or; /* in, value OR'd with result */ - __s32 shift; /* in, amount of left shift (can be negative) */ + __s32 shift; /* in, amount of left shift (can be negative) */ __u32 reloc_idx; /* in, index of reloc_bo buffer */ __u64 reloc_offset; /* in, offset from start of reloc_bo */ }; diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index fd594cc73cc0..500d82aecbe4 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -27,6 +27,8 @@ #define DRM_NOUVEAU_EVENT_NVIF 0x80000000 +#include <drm/drm.h> + #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) @@ -41,34 +43,34 @@ #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 struct drm_nouveau_gem_info { - uint32_t handle; - uint32_t domain; - uint64_t size; - uint64_t offset; - uint64_t map_handle; - uint32_t tile_mode; - uint32_t tile_flags; + __u32 handle; + __u32 domain; + __u64 size; + __u64 offset; + __u64 map_handle; + __u32 tile_mode; + __u32 tile_flags; }; struct drm_nouveau_gem_new { struct drm_nouveau_gem_info info; - uint32_t channel_hint; - uint32_t align; + __u32 channel_hint; + __u32 align; }; #define NOUVEAU_GEM_MAX_BUFFERS 1024 struct drm_nouveau_gem_pushbuf_bo_presumed { - uint32_t valid; - uint32_t domain; - uint64_t offset; + __u32 valid; + __u32 domain; + __u64 offset; }; struct drm_nouveau_gem_pushbuf_bo { - uint64_t user_priv; - uint32_t handle; - uint32_t read_domains; - uint32_t write_domains; - uint32_t valid_domains; + __u64 user_priv; + __u32 handle; + __u32 read_domains; + __u32 write_domains; + __u32 valid_domains; struct drm_nouveau_gem_pushbuf_bo_presumed presumed; }; @@ -77,46 +79,46 @@ struct drm_nouveau_gem_pushbuf_bo { #define NOUVEAU_GEM_RELOC_OR (1 << 2) #define NOUVEAU_GEM_MAX_RELOCS 1024 struct drm_nouveau_gem_pushbuf_reloc { - uint32_t reloc_bo_index; - uint32_t reloc_bo_offset; - uint32_t bo_index; - uint32_t flags; - uint32_t data; - uint32_t vor; - uint32_t tor; + __u32 reloc_bo_index; + __u32 reloc_bo_offset; + __u32 bo_index; + __u32 flags; + __u32 data; + __u32 vor; + __u32 tor; }; #define NOUVEAU_GEM_MAX_PUSH 512 struct drm_nouveau_gem_pushbuf_push { - uint32_t bo_index; - uint32_t pad; - uint64_t offset; - uint64_t length; + __u32 bo_index; + __u32 pad; + __u64 offset; + __u64 length; }; struct drm_nouveau_gem_pushbuf { - uint32_t channel; - uint32_t nr_buffers; - uint64_t buffers; - uint32_t nr_relocs; - uint32_t nr_push; - uint64_t relocs; - uint64_t push; - uint32_t suffix0; - uint32_t suffix1; - uint64_t vram_available; - uint64_t gart_available; + __u32 channel; + __u32 nr_buffers; + __u64 buffers; + __u32 nr_relocs; + __u32 nr_push; + __u64 relocs; + __u64 push; + __u32 suffix0; + __u32 suffix1; + __u64 vram_available; + __u64 gart_available; }; #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 struct drm_nouveau_gem_cpu_prep { - uint32_t handle; - uint32_t flags; + __u32 handle; + __u32 flags; }; struct drm_nouveau_gem_cpu_fini { - uint32_t handle; + __u32 handle; }; #define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */ diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 1d0b1172664e..38a3bd847e15 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -20,7 +20,7 @@ #ifndef __OMAP_DRM_H__ #define __OMAP_DRM_H__ -#include <drm/drm.h> +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. @@ -101,9 +101,6 @@ struct drm_omap_gem_info { #define DRM_OMAP_GET_PARAM 0x00 #define DRM_OMAP_SET_PARAM 0x01 -/* placeholder for plugin-api -#define DRM_OMAP_GET_BASE 0x02 -*/ #define DRM_OMAP_GEM_NEW 0x03 #define DRM_OMAP_GEM_CPU_PREP 0x04 #define DRM_OMAP_GEM_CPU_FINI 0x05 @@ -112,9 +109,6 @@ struct drm_omap_gem_info { #define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) #define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) -/* placeholder for plugin-api -#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base) -*/ #define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) #define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index ebebd36c4117..4d1e32640463 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -24,13 +24,12 @@ #ifndef QXL_DRM_H #define QXL_DRM_H -#include <stddef.h> -#include "drm/drm.h" +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel * compatibility Keep fields aligned to their size */ @@ -48,14 +47,14 @@ #define DRM_QXL_ALLOC_SURF 0x06 struct drm_qxl_alloc { - uint32_t size; - uint32_t handle; /* 0 is an invalid handle */ + __u32 size; + __u32 handle; /* 0 is an invalid handle */ }; struct drm_qxl_map { - uint64_t offset; /* use for mmap system call */ - uint32_t handle; - uint32_t pad; + __u64 offset; /* use for mmap system call */ + __u32 handle; + __u32 pad; }; /* @@ -68,59 +67,59 @@ struct drm_qxl_map { #define QXL_RELOC_TYPE_SURF 2 struct drm_qxl_reloc { - uint64_t src_offset; /* offset into src_handle or src buffer */ - uint64_t dst_offset; /* offset in dest handle */ - uint32_t src_handle; /* dest handle to compute address from */ - uint32_t dst_handle; /* 0 if to command buffer */ - uint32_t reloc_type; - uint32_t pad; + __u64 src_offset; /* offset into src_handle or src buffer */ + __u64 dst_offset; /* offset in dest handle */ + __u32 src_handle; /* dest handle to compute address from */ + __u32 dst_handle; /* 0 if to command buffer */ + __u32 reloc_type; + __u32 pad; }; struct drm_qxl_command { - uint64_t __user command; /* void* */ - uint64_t __user relocs; /* struct drm_qxl_reloc* */ - uint32_t type; - uint32_t command_size; - uint32_t relocs_num; - uint32_t pad; + __u64 __user command; /* void* */ + __u64 __user relocs; /* struct drm_qxl_reloc* */ + __u32 type; + __u32 command_size; + __u32 relocs_num; + __u32 pad; }; /* XXX: call it drm_qxl_commands? */ struct drm_qxl_execbuffer { - uint32_t flags; /* for future use */ - uint32_t commands_num; - uint64_t __user commands; /* struct drm_qxl_command* */ + __u32 flags; /* for future use */ + __u32 commands_num; + __u64 __user commands; /* struct drm_qxl_command* */ }; struct drm_qxl_update_area { - uint32_t handle; - uint32_t top; - uint32_t left; - uint32_t bottom; - uint32_t right; - uint32_t pad; + __u32 handle; + __u32 top; + __u32 left; + __u32 bottom; + __u32 right; + __u32 pad; }; #define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ #define QXL_PARAM_MAX_RELOCS 2 struct drm_qxl_getparam { - uint64_t param; - uint64_t value; + __u64 param; + __u64 value; }; /* these are one bit values */ struct drm_qxl_clientcap { - uint32_t index; - uint32_t pad; + __u32 index; + __u32 pad; }; struct drm_qxl_alloc_surf { - uint32_t format; - uint32_t width; - uint32_t height; - int32_t stride; - uint32_t handle; - uint32_t pad; + __u32 format; + __u32 width; + __u32 height; + __s32 stride; + __u32 handle; + __u32 pad; }; #define DRM_IOCTL_QXL_ALLOC \ diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h index 76b0aa3e8210..7a44c6500a7e 100644 --- a/include/uapi/drm/r128_drm.h +++ b/include/uapi/drm/r128_drm.h @@ -33,7 +33,7 @@ #ifndef __R128_DRM_H__ #define __R128_DRM_H__ -#include <drm/drm.h> +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (r128_sarea.h) diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 01aa2a8e3f8d..ccb9bcd82685 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -793,9 +793,9 @@ typedef struct drm_radeon_surface_free { #define RADEON_GEM_DOMAIN_VRAM 0x4 struct drm_radeon_gem_info { - uint64_t gart_size; - uint64_t vram_size; - uint64_t vram_visible; + __u64 gart_size; + __u64 vram_size; + __u64 vram_visible; }; #define RADEON_GEM_NO_BACKING_STORE (1 << 0) @@ -807,11 +807,11 @@ struct drm_radeon_gem_info { #define RADEON_GEM_NO_CPU_ACCESS (1 << 4) struct drm_radeon_gem_create { - uint64_t size; - uint64_t alignment; - uint32_t handle; - uint32_t initial_domain; - uint32_t flags; + __u64 size; + __u64 alignment; + __u32 handle; + __u32 initial_domain; + __u32 flags; }; /* @@ -825,10 +825,10 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_REGISTER (1 << 3) struct drm_radeon_gem_userptr { - uint64_t addr; - uint64_t size; - uint32_t flags; - uint32_t handle; + __u64 addr; + __u64 size; + __u32 flags; + __u32 handle; }; #define RADEON_TILING_MACRO 0x1 @@ -850,72 +850,72 @@ struct drm_radeon_gem_userptr { #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf struct drm_radeon_gem_set_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_get_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_mmap { - uint32_t handle; - uint32_t pad; - uint64_t offset; - uint64_t size; - uint64_t addr_ptr; + __u32 handle; + __u32 pad; + __u64 offset; + __u64 size; + __u64 addr_ptr; }; struct drm_radeon_gem_set_domain { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; + __u32 handle; + __u32 read_domains; + __u32 write_domain; }; struct drm_radeon_gem_wait_idle { - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; }; struct drm_radeon_gem_busy { - uint32_t handle; - uint32_t domain; + __u32 handle; + __u32 domain; }; struct drm_radeon_gem_pread { /** Handle for the object being read. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to read from */ - uint64_t offset; + __u64 offset; /** Length of data to read */ - uint64_t size; + __u64 size; /** Pointer to write the data into. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; struct drm_radeon_gem_pwrite { /** Handle for the object being written to. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to write to */ - uint64_t offset; + __u64 offset; /** Length of data to write */ - uint64_t size; + __u64 size; /** Pointer to read the data from. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; /* Sets or returns a value associated with a buffer. */ struct drm_radeon_gem_op { - uint32_t handle; /* buffer */ - uint32_t op; /* RADEON_GEM_OP_* */ - uint64_t value; /* input or return value */ + __u32 handle; /* buffer */ + __u32 op; /* RADEON_GEM_OP_* */ + __u64 value; /* input or return value */ }; #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 @@ -935,11 +935,11 @@ struct drm_radeon_gem_op { #define RADEON_VM_PAGE_SNOOPED (1 << 4) struct drm_radeon_gem_va { - uint32_t handle; - uint32_t operation; - uint32_t vm_id; - uint32_t flags; - uint64_t offset; + __u32 handle; + __u32 operation; + __u32 vm_id; + __u32 flags; + __u64 offset; }; #define RADEON_CHUNK_ID_RELOCS 0x01 @@ -961,29 +961,29 @@ struct drm_radeon_gem_va { /* 0 = normal, + = higher priority, - = lower priority */ struct drm_radeon_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - uint64_t chunk_data; + __u32 chunk_id; + __u32 length_dw; + __u64 chunk_data; }; /* drm_radeon_cs_reloc.flags */ #define RADEON_RELOC_PRIO_MASK (0xf << 0) struct drm_radeon_cs_reloc { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; - uint32_t flags; + __u32 handle; + __u32 read_domains; + __u32 write_domain; + __u32 flags; }; struct drm_radeon_cs { - uint32_t num_chunks; - uint32_t cs_id; - /* this points to uint64_t * which point to cs chunks */ - uint64_t chunks; + __u32 num_chunks; + __u32 cs_id; + /* this points to __u64 * which point to cs chunks */ + __u64 chunks; /* updates to the limits after this CS ioctl */ - uint64_t gart_limit; - uint64_t vram_limit; + __u64 gart_limit; + __u64 vram_limit; }; #define RADEON_INFO_DEVICE_ID 0x00 @@ -1042,9 +1042,9 @@ struct drm_radeon_cs { #define RADEON_INFO_GPU_RESET_COUNTER 0x26 struct drm_radeon_info { - uint32_t request; - uint32_t pad; - uint64_t value; + __u32 request; + __u32 pad; + __u64 value; }; /* Those correspond to the tile index to use, this is to explicitly state diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h index 9dc9dc1a7753..574147489c60 100644 --- a/include/uapi/drm/savage_drm.h +++ b/include/uapi/drm/savage_drm.h @@ -26,7 +26,7 @@ #ifndef __SAVAGE_DRM_H__ #define __SAVAGE_DRM_H__ -#include <drm/drm.h> +#include "drm.h" #ifndef __SAVAGE_SAREA_DEFINES__ #define __SAVAGE_SAREA_DEFINES__ diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h index 5391780c2b05..27d0b054aed0 100644 --- a/include/uapi/drm/tegra_drm.h +++ b/include/uapi/drm/tegra_drm.h @@ -23,7 +23,7 @@ #ifndef _UAPI_TEGRA_DRM_H_ #define _UAPI_TEGRA_DRM_H_ -#include <drm/drm.h> +#include "drm.h" #define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h new file mode 100644 index 000000000000..eeb37e394f13 --- /dev/null +++ b/include/uapi/drm/vc4_drm.h @@ -0,0 +1,279 @@ +/* + * Copyright © 2014-2015 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _UAPI_VC4_DRM_H_ +#define _UAPI_VC4_DRM_H_ + +#include "drm.h" + +#define DRM_VC4_SUBMIT_CL 0x00 +#define DRM_VC4_WAIT_SEQNO 0x01 +#define DRM_VC4_WAIT_BO 0x02 +#define DRM_VC4_CREATE_BO 0x03 +#define DRM_VC4_MMAP_BO 0x04 +#define DRM_VC4_CREATE_SHADER_BO 0x05 +#define DRM_VC4_GET_HANG_STATE 0x06 + +#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) +#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) +#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo) +#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) +#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) +#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) +#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) + +struct drm_vc4_submit_rcl_surface { + __u32 hindex; /* Handle index, or ~0 if not present. */ + __u32 offset; /* Offset to start of buffer. */ + /* + * Bits for either render config (color_write) or load/store packet. + * Bits should all be 0 for MSAA load/stores. + */ + __u16 bits; + +#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0) + __u16 flags; +}; + +/** + * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D + * engine. + * + * Drivers typically use GPU BOs to store batchbuffers / command lists and + * their associated state. However, because the VC4 lacks an MMU, we have to + * do validation of memory accesses by the GPU commands. If we were to store + * our commands in BOs, we'd need to do uncached readback from them to do the + * validation process, which is too expensive. Instead, userspace accumulates + * commands and associated state in plain memory, then the kernel copies the + * data to its own address space, and then validates and stores it in a GPU + * BO. + */ +struct drm_vc4_submit_cl { + /* Pointer to the binner command list. + * + * This is the first set of commands executed, which runs the + * coordinate shader to determine where primitives land on the screen, + * then writes out the state updates and draw calls necessary per tile + * to the tile allocation BO. + */ + __u64 bin_cl; + + /* Pointer to the shader records. + * + * Shader records are the structures read by the hardware that contain + * pointers to uniforms, shaders, and vertex attributes. The + * reference to the shader record has enough information to determine + * how many pointers are necessary (fixed number for shaders/uniforms, + * and an attribute count), so those BO indices into bo_handles are + * just stored as __u32s before each shader record passed in. + */ + __u64 shader_rec; + + /* Pointer to uniform data and texture handles for the textures + * referenced by the shader. + * + * For each shader state record, there is a set of uniform data in the + * order referenced by the record (FS, VS, then CS). Each set of + * uniform data has a __u32 index into bo_handles per texture + * sample operation, in the order the QPU_W_TMUn_S writes appear in + * the program. Following the texture BO handle indices is the actual + * uniform data. + * + * The individual uniform state blocks don't have sizes passed in, + * because the kernel has to determine the sizes anyway during shader + * code validation. + */ + __u64 uniforms; + __u64 bo_handles; + + /* Size in bytes of the binner command list. */ + __u32 bin_cl_size; + /* Size in bytes of the set of shader records. */ + __u32 shader_rec_size; + /* Number of shader records. + * + * This could just be computed from the contents of shader_records and + * the address bits of references to them from the bin CL, but it + * keeps the kernel from having to resize some allocations it makes. + */ + __u32 shader_rec_count; + /* Size in bytes of the uniform state. */ + __u32 uniforms_size; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + /* RCL setup: */ + __u16 width; + __u16 height; + __u8 min_x_tile; + __u8 min_y_tile; + __u8 max_x_tile; + __u8 max_y_tile; + struct drm_vc4_submit_rcl_surface color_read; + struct drm_vc4_submit_rcl_surface color_write; + struct drm_vc4_submit_rcl_surface zs_read; + struct drm_vc4_submit_rcl_surface zs_write; + struct drm_vc4_submit_rcl_surface msaa_color_write; + struct drm_vc4_submit_rcl_surface msaa_zs_write; + __u32 clear_color[2]; + __u32 clear_z; + __u8 clear_s; + + __u32 pad:24; + +#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) + __u32 flags; + + /* Returned value of the seqno of this render job (for the + * wait ioctl). + */ + __u64 seqno; +}; + +/** + * struct drm_vc4_wait_seqno - ioctl argument for waiting for + * DRM_VC4_SUBMIT_CL completion using its returned seqno. + * + * timeout_ns is the timeout in nanoseconds, where "0" means "don't + * block, just return the status." + */ +struct drm_vc4_wait_seqno { + __u64 seqno; + __u64 timeout_ns; +}; + +/** + * struct drm_vc4_wait_bo - ioctl argument for waiting for + * completion of the last DRM_VC4_SUBMIT_CL on a BO. + * + * This is useful for cases where multiple processes might be + * rendering to a BO and you want to wait for all rendering to be + * completed. + */ +struct drm_vc4_wait_bo { + __u32 handle; + __u32 pad; + __u64 timeout_ns; +}; + +/** + * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_vc4_create_bo { + __u32 size; + __u32 flags; + /** Returned GEM handle for the BO. */ + __u32 handle; + __u32 pad; +}; + +/** + * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs. + * + * This doesn't actually perform an mmap. Instead, it returns the + * offset you need to use in an mmap on the DRM device node. This + * means that tools like valgrind end up knowing about the mapped + * memory. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_vc4_mmap_bo { + /** Handle for the object being mapped. */ + __u32 handle; + __u32 flags; + /** offset into the drm node to use for subsequent mmap call. */ + __u64 offset; +}; + +/** + * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4 + * shader BOs. + * + * Since allowing a shader to be overwritten while it's also being + * executed from would allow privlege escalation, shaders must be + * created using this ioctl, and they can't be mmapped later. + */ +struct drm_vc4_create_shader_bo { + /* Size of the data argument. */ + __u32 size; + /* Flags, currently must be 0. */ + __u32 flags; + + /* Pointer to the data. */ + __u64 data; + + /** Returned GEM handle for the BO. */ + __u32 handle; + /* Pad, must be 0. */ + __u32 pad; +}; + +struct drm_vc4_get_hang_state_bo { + __u32 handle; + __u32 paddr; + __u32 size; + __u32 pad; +}; + +/** + * struct drm_vc4_hang_state - ioctl argument for collecting state + * from a GPU hang for analysis. +*/ +struct drm_vc4_get_hang_state { + /** Pointer to array of struct drm_vc4_get_hang_state_bo. */ + __u64 bo; + /** + * On input, the size of the bo array. Output is the number + * of bos to be returned. + */ + __u32 bo_count; + + __u32 start_bin, start_render; + + __u32 ct0ca, ct0ea; + __u32 ct1ca, ct1ea; + __u32 ct0cs, ct1cs; + __u32 ct0ra0, ct1ra0; + + __u32 bpca, bpcs; + __u32 bpoa, bpos; + + __u32 vpmbase; + + __u32 dbge; + __u32 fdbgo; + __u32 fdbgb; + __u32 fdbgr; + __u32 fdbgs; + __u32 errstat; + + /* Pad that we may save more registers into in the future. */ + __u32 pad[16]; +}; + +#endif /* _UAPI_VC4_DRM_H_ */ diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h index 45bc80c3714b..fa21ed185520 100644 --- a/include/uapi/drm/via_drm.h +++ b/include/uapi/drm/via_drm.h @@ -24,7 +24,7 @@ #ifndef _VIA_DRM_H_ #define _VIA_DRM_H_ -#include <drm/drm.h> +#include "drm.h" /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. @@ -33,9 +33,6 @@ #ifndef _VIA_DEFINES_ #define _VIA_DEFINES_ -#ifndef __KERNEL__ -#include "via_drmclient.h" -#endif #define VIA_NR_SAREA_CLIPRECTS 8 #define VIA_NR_XVMC_PORTS 10 diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index fc9e2d6e5e2f..c74f1f90cb37 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -24,13 +24,12 @@ #ifndef VIRTGPU_DRM_H #define VIRTGPU_DRM_H -#include <stddef.h> -#include "drm/drm.h" +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel * compatibility Keep fields aligned to their size */ @@ -45,88 +44,88 @@ #define DRM_VIRTGPU_GET_CAPS 0x09 struct drm_virtgpu_map { - uint64_t offset; /* use for mmap system call */ - uint32_t handle; - uint32_t pad; + __u64 offset; /* use for mmap system call */ + __u32 handle; + __u32 pad; }; struct drm_virtgpu_execbuffer { - uint32_t flags; /* for future use */ - uint32_t size; - uint64_t command; /* void* */ - uint64_t bo_handles; - uint32_t num_bo_handles; - uint32_t pad; + __u32 flags; /* for future use */ + __u32 size; + __u64 command; /* void* */ + __u64 bo_handles; + __u32 num_bo_handles; + __u32 pad; }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ struct drm_virtgpu_getparam { - uint64_t param; - uint64_t value; + __u64 param; + __u64 value; }; /* NO_BO flags? NO resource flag? */ /* resource flag for y_0_top */ struct drm_virtgpu_resource_create { - uint32_t target; - uint32_t format; - uint32_t bind; - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t array_size; - uint32_t last_level; - uint32_t nr_samples; - uint32_t flags; - uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ - uint32_t res_handle; /* returned by kernel */ - uint32_t size; /* validate transfer in the host */ - uint32_t stride; /* validate transfer in the host */ + __u32 target; + __u32 format; + __u32 bind; + __u32 width; + __u32 height; + __u32 depth; + __u32 array_size; + __u32 last_level; + __u32 nr_samples; + __u32 flags; + __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ + __u32 res_handle; /* returned by kernel */ + __u32 size; /* validate transfer in the host */ + __u32 stride; /* validate transfer in the host */ }; struct drm_virtgpu_resource_info { - uint32_t bo_handle; - uint32_t res_handle; - uint32_t size; - uint32_t stride; + __u32 bo_handle; + __u32 res_handle; + __u32 size; + __u32 stride; }; struct drm_virtgpu_3d_box { - uint32_t x; - uint32_t y; - uint32_t z; - uint32_t w; - uint32_t h; - uint32_t d; + __u32 x; + __u32 y; + __u32 z; + __u32 w; + __u32 h; + __u32 d; }; struct drm_virtgpu_3d_transfer_to_host { - uint32_t bo_handle; + __u32 bo_handle; struct drm_virtgpu_3d_box box; - uint32_t level; - uint32_t offset; + __u32 level; + __u32 offset; }; struct drm_virtgpu_3d_transfer_from_host { - uint32_t bo_handle; + __u32 bo_handle; struct drm_virtgpu_3d_box box; - uint32_t level; - uint32_t offset; + __u32 level; + __u32 offset; }; #define VIRTGPU_WAIT_NOWAIT 1 /* like it */ struct drm_virtgpu_3d_wait { - uint32_t handle; /* 0 is an invalid handle */ - uint32_t flags; + __u32 handle; /* 0 is an invalid handle */ + __u32 flags; }; struct drm_virtgpu_get_caps { - uint32_t cap_set_id; - uint32_t cap_set_ver; - uint64_t addr; - uint32_t size; - uint32_t pad; + __u32 cap_set_id; + __u32 cap_set_ver; + __u64 addr; + __u32 size; + __u32 pad; }; #define DRM_IOCTL_VIRTGPU_MAP \ diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 05b204954d16..5b68b4d10884 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -28,9 +28,7 @@ #ifndef __VMWGFX_DRM_H__ #define __VMWGFX_DRM_H__ -#ifndef __KERNEL__ -#include <drm/drm.h> -#endif +#include "drm.h" #define DRM_VMW_MAX_SURFACE_FACES 6 #define DRM_VMW_MAX_MIP_LEVELS 24 @@ -111,9 +109,9 @@ enum drm_vmw_handle_type { */ struct drm_vmw_getparam_arg { - uint64_t value; - uint32_t param; - uint32_t pad64; + __u64 value; + __u32 param; + __u32 pad64; }; /*************************************************************************/ @@ -134,8 +132,8 @@ struct drm_vmw_getparam_arg { */ struct drm_vmw_context_arg { - int32_t cid; - uint32_t pad64; + __s32 cid; + __u32 pad64; }; /*************************************************************************/ @@ -165,7 +163,7 @@ struct drm_vmw_context_arg { * @mip_levels: Number of mip levels for each face. * An unused face should have 0 encoded. * @size_addr: Address of a user-space array of sruct drm_vmw_size - * cast to an uint64_t for 32-64 bit compatibility. + * cast to an __u64 for 32-64 bit compatibility. * The size of the array should equal the total number of mipmap levels. * @shareable: Boolean whether other clients (as identified by file descriptors) * may reference this surface. @@ -177,12 +175,12 @@ struct drm_vmw_context_arg { */ struct drm_vmw_surface_create_req { - uint32_t flags; - uint32_t format; - uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; - uint64_t size_addr; - int32_t shareable; - int32_t scanout; + __u32 flags; + __u32 format; + __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + __u64 size_addr; + __s32 shareable; + __s32 scanout; }; /** @@ -197,7 +195,7 @@ struct drm_vmw_surface_create_req { */ struct drm_vmw_surface_arg { - int32_t sid; + __s32 sid; enum drm_vmw_handle_type handle_type; }; @@ -213,10 +211,10 @@ struct drm_vmw_surface_arg { */ struct drm_vmw_size { - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t pad64; + __u32 width; + __u32 height; + __u32 depth; + __u32 pad64; }; /** @@ -284,13 +282,13 @@ union drm_vmw_surface_reference_arg { /** * struct drm_vmw_execbuf_arg * - * @commands: User-space address of a command buffer cast to an uint64_t. + * @commands: User-space address of a command buffer cast to an __u64. * @command-size: Size in bytes of the command buffer. * @throttle-us: Sleep until software is less than @throttle_us * microseconds ahead of hardware. The driver may round this value * to the nearest kernel tick. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an - * uint64_t. + * __u64. * @version: Allows expanding the execbuf ioctl parameters without breaking * backwards compatibility, since user-space will always tell the kernel * which version it uses. @@ -302,14 +300,14 @@ union drm_vmw_surface_reference_arg { #define DRM_VMW_EXECBUF_VERSION 2 struct drm_vmw_execbuf_arg { - uint64_t commands; - uint32_t command_size; - uint32_t throttle_us; - uint64_t fence_rep; - uint32_t version; - uint32_t flags; - uint32_t context_handle; - uint32_t pad64; + __u64 commands; + __u32 command_size; + __u32 throttle_us; + __u64 fence_rep; + __u32 version; + __u32 flags; + __u32 context_handle; + __u32 pad64; }; /** @@ -338,12 +336,12 @@ struct drm_vmw_execbuf_arg { */ struct drm_vmw_fence_rep { - uint32_t handle; - uint32_t mask; - uint32_t seqno; - uint32_t passed_seqno; - uint32_t pad64; - int32_t error; + __u32 handle; + __u32 mask; + __u32 seqno; + __u32 passed_seqno; + __u32 pad64; + __s32 error; }; /*************************************************************************/ @@ -373,8 +371,8 @@ struct drm_vmw_fence_rep { */ struct drm_vmw_alloc_dmabuf_req { - uint32_t size; - uint32_t pad64; + __u32 size; + __u32 pad64; }; /** @@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req { */ struct drm_vmw_dmabuf_rep { - uint64_t map_handle; - uint32_t handle; - uint32_t cur_gmr_id; - uint32_t cur_gmr_offset; - uint32_t pad64; + __u64 map_handle; + __u32 handle; + __u32 cur_gmr_id; + __u32 cur_gmr_offset; + __u32 pad64; }; /** @@ -428,8 +426,8 @@ union drm_vmw_alloc_dmabuf_arg { */ struct drm_vmw_unref_dmabuf_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -452,10 +450,10 @@ struct drm_vmw_unref_dmabuf_arg { */ struct drm_vmw_rect { - int32_t x; - int32_t y; - uint32_t w; - uint32_t h; + __s32 x; + __s32 y; + __u32 w; + __u32 h; }; /** @@ -477,21 +475,21 @@ struct drm_vmw_rect { */ struct drm_vmw_control_stream_arg { - uint32_t stream_id; - uint32_t enabled; + __u32 stream_id; + __u32 enabled; - uint32_t flags; - uint32_t color_key; + __u32 flags; + __u32 color_key; - uint32_t handle; - uint32_t offset; - int32_t format; - uint32_t size; - uint32_t width; - uint32_t height; - uint32_t pitch[3]; + __u32 handle; + __u32 offset; + __s32 format; + __u32 size; + __u32 width; + __u32 height; + __u32 pitch[3]; - uint32_t pad64; + __u32 pad64; struct drm_vmw_rect src; struct drm_vmw_rect dst; }; @@ -519,12 +517,12 @@ struct drm_vmw_control_stream_arg { */ struct drm_vmw_cursor_bypass_arg { - uint32_t flags; - uint32_t crtc_id; - int32_t xpos; - int32_t ypos; - int32_t xhot; - int32_t yhot; + __u32 flags; + __u32 crtc_id; + __s32 xpos; + __s32 ypos; + __s32 xhot; + __s32 yhot; }; /*************************************************************************/ @@ -542,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg { */ struct drm_vmw_stream_arg { - uint32_t stream_id; - uint32_t pad64; + __u32 stream_id; + __u32 pad64; }; /*************************************************************************/ @@ -565,7 +563,7 @@ struct drm_vmw_stream_arg { /** * struct drm_vmw_get_3d_cap_arg * - * @buffer: Pointer to a buffer for capability data, cast to an uint64_t + * @buffer: Pointer to a buffer for capability data, cast to an __u64 * @size: Max size to copy * * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL @@ -573,9 +571,9 @@ struct drm_vmw_stream_arg { */ struct drm_vmw_get_3d_cap_arg { - uint64_t buffer; - uint32_t max_size; - uint32_t pad64; + __u64 buffer; + __u32 max_size; + __u32 pad64; }; /*************************************************************************/ @@ -624,14 +622,14 @@ struct drm_vmw_get_3d_cap_arg { */ struct drm_vmw_fence_wait_arg { - uint32_t handle; - int32_t cookie_valid; - uint64_t kernel_cookie; - uint64_t timeout_us; - int32_t lazy; - int32_t flags; - int32_t wait_options; - int32_t pad64; + __u32 handle; + __s32 cookie_valid; + __u64 kernel_cookie; + __u64 timeout_us; + __s32 lazy; + __s32 flags; + __s32 wait_options; + __s32 pad64; }; /*************************************************************************/ @@ -655,12 +653,12 @@ struct drm_vmw_fence_wait_arg { */ struct drm_vmw_fence_signaled_arg { - uint32_t handle; - uint32_t flags; - int32_t signaled; - uint32_t passed_seqno; - uint32_t signaled_flags; - uint32_t pad64; + __u32 handle; + __u32 flags; + __s32 signaled; + __u32 passed_seqno; + __u32 signaled_flags; + __u32 pad64; }; /*************************************************************************/ @@ -681,8 +679,8 @@ struct drm_vmw_fence_signaled_arg { */ struct drm_vmw_fence_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; @@ -703,9 +701,9 @@ struct drm_vmw_fence_arg { struct drm_vmw_event_fence { struct drm_event base; - uint64_t user_data; - uint32_t tv_sec; - uint32_t tv_usec; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; }; /* @@ -717,17 +715,17 @@ struct drm_vmw_event_fence { /** * struct drm_vmw_fence_event_arg * - * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if + * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if * the fence is not supposed to be referenced by user-space. * @user_info: Info to be delivered with the event. * @handle: Attach the event to this fence only. * @flags: A set of flags as defined above. */ struct drm_vmw_fence_event_arg { - uint64_t fence_rep; - uint64_t user_data; - uint32_t handle; - uint32_t flags; + __u64 fence_rep; + __u64 user_data; + __u32 handle; + __u32 flags; }; @@ -747,7 +745,7 @@ struct drm_vmw_fence_event_arg { * @sid: Surface id to present from. * @dest_x: X placement coordinate for surface. * @dest_y: Y placement coordinate for surface. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. * @num_clips: Number of cliprects given relative to the framebuffer origin, * in the same coordinate space as the frame buffer. * @pad64: Unused 64-bit padding. @@ -756,13 +754,13 @@ struct drm_vmw_fence_event_arg { */ struct drm_vmw_present_arg { - uint32_t fb_id; - uint32_t sid; - int32_t dest_x; - int32_t dest_y; - uint64_t clips_ptr; - uint32_t num_clips; - uint32_t pad64; + __u32 fb_id; + __u32 sid; + __s32 dest_x; + __s32 dest_y; + __u64 clips_ptr; + __u32 num_clips; + __u32 pad64; }; @@ -780,16 +778,16 @@ struct drm_vmw_present_arg { * struct drm_vmw_present_arg * @fb_id: fb_id to present / read back from. * @num_clips: Number of cliprects. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. - * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. + * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. * If this member is NULL, then the ioctl should not return a fence. */ struct drm_vmw_present_readback_arg { - uint32_t fb_id; - uint32_t num_clips; - uint64_t clips_ptr; - uint64_t fence_rep; + __u32 fb_id; + __u32 num_clips; + __u64 clips_ptr; + __u64 fence_rep; }; /*************************************************************************/ @@ -805,14 +803,14 @@ struct drm_vmw_present_readback_arg { * struct drm_vmw_update_layout_arg * * @num_outputs: number of active connectors - * @rects: pointer to array of drm_vmw_rect cast to an uint64_t + * @rects: pointer to array of drm_vmw_rect cast to an __u64 * * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. */ struct drm_vmw_update_layout_arg { - uint32_t num_outputs; - uint32_t pad64; - uint64_t rects; + __u32 num_outputs; + __u32 pad64; + __u64 rects; }; @@ -849,10 +847,10 @@ enum drm_vmw_shader_type { */ struct drm_vmw_shader_create_arg { enum drm_vmw_shader_type shader_type; - uint32_t size; - uint32_t buffer_handle; - uint32_t shader_handle; - uint64_t offset; + __u32 size; + __u32 buffer_handle; + __u32 shader_handle; + __u64 offset; }; /*************************************************************************/ @@ -871,8 +869,8 @@ struct drm_vmw_shader_create_arg { * Input argument to the DRM_VMW_UNREF_SHADER ioctl. */ struct drm_vmw_shader_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -918,14 +916,14 @@ enum drm_vmw_surface_flags { * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. */ struct drm_vmw_gb_surface_create_req { - uint32_t svga3d_flags; - uint32_t format; - uint32_t mip_levels; + __u32 svga3d_flags; + __u32 format; + __u32 mip_levels; enum drm_vmw_surface_flags drm_surface_flags; - uint32_t multisample_count; - uint32_t autogen_filter; - uint32_t buffer_handle; - uint32_t array_size; + __u32 multisample_count; + __u32 autogen_filter; + __u32 buffer_handle; + __u32 array_size; struct drm_vmw_size base_size; }; @@ -944,11 +942,11 @@ struct drm_vmw_gb_surface_create_req { * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. */ struct drm_vmw_gb_surface_create_rep { - uint32_t handle; - uint32_t backup_size; - uint32_t buffer_handle; - uint32_t buffer_size; - uint64_t buffer_map_handle; + __u32 handle; + __u32 backup_size; + __u32 buffer_handle; + __u32 buffer_size; + __u64 buffer_map_handle; }; /** @@ -1061,8 +1059,8 @@ enum drm_vmw_synccpu_op { struct drm_vmw_synccpu_arg { enum drm_vmw_synccpu_op op; enum drm_vmw_synccpu_flags flags; - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h index 4e828cf487bc..f5251045181a 100644 --- a/include/uapi/linux/agpgart.h +++ b/include/uapi/linux/agpgart.h @@ -52,6 +52,7 @@ #ifndef __KERNEL__ #include <linux/types.h> +#include <stdlib.h> struct agp_version { __u16 major; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9ea2d22fa2cb..aa6f8571de13 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -269,9 +269,29 @@ enum bpf_func_id { * Return: 0 on success */ BPF_FUNC_perf_event_output, + BPF_FUNC_skb_load_bytes, __BPF_FUNC_MAX_ID, }; +/* All flags used by eBPF helper functions, placed here. */ + +/* BPF_FUNC_skb_store_bytes flags. */ +#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) + +/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. + * First 4 bits are for passing the header field size. + */ +#define BPF_F_HDR_FIELD_MASK 0xfULL + +/* BPF_FUNC_l4_csum_replace flags. */ +#define BPF_F_PSEUDO_HDR (1ULL << 4) + +/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ +#define BPF_F_INGRESS (1ULL << 0) + +/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ +#define BPF_F_TUNINFO_IPV6 (1ULL << 0) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -295,7 +315,12 @@ struct __sk_buff { struct bpf_tunnel_key { __u32 tunnel_id; - __u32 remote_ipv4; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h index d3d14a59d2d5..49392564f9d6 100644 --- a/include/uapi/linux/dvb/video.h +++ b/include/uapi/linux/dvb/video.h @@ -26,7 +26,6 @@ #include <linux/types.h> #ifndef __KERNEL__ -#include <stdint.h> #include <time.h> #endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index cd1629170103..57fa39005e79 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -542,6 +542,7 @@ struct ethtool_pauseparam { * now deprecated * @ETH_SS_FEATURES: Device feature names * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names + * @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -551,6 +552,7 @@ enum ethtool_stringset { ETH_SS_FEATURES, ETH_SS_RSS_HASH_FUNCS, ETH_SS_TUNABLES, + ETH_SS_PHY_STATS, }; /** @@ -1225,6 +1227,7 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ #define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */ #define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ +#define ETHTOOL_GPHYSTATS 0x0000004a /* get PHY-specific statistics */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index f15d980249b5..b8a5b3b86ad6 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -39,12 +39,48 @@ #define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ #define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + struct fstrim_range { __u64 start; __u64 len; __u64 minlen; }; +/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ +#define FILE_DEDUPE_RANGE_SAME 0 +#define FILE_DEDUPE_RANGE_DIFFERS 1 + +/* from struct btrfs_ioctl_file_extent_same_info */ +struct file_dedupe_range_info { + __s64 dest_fd; /* in - destination file */ + __u64 dest_offset; /* in - start of extent in destination */ + __u64 bytes_deduped; /* out - total # of bytes we were able + * to dedupe from this file. */ + /* status of this dedupe operation: + * < 0 for error + * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds + * == FILE_DEDUPE_RANGE_DIFFERS if data differs + */ + __s32 status; /* out - see above description */ + __u32 reserved; /* must be zero */ +}; + +/* from struct btrfs_ioctl_file_extent_same_args */ +struct file_dedupe_range { + __u64 src_offset; /* in - start of extent in source */ + __u64 src_length; /* in - length of extent */ + __u16 dest_count; /* in - total elements in info array */ + __u16 reserved1; /* must be zero */ + __u32 reserved2; /* must be zero */ + struct file_dedupe_range_info info[0]; +}; + /* And dynamically-tunable limits and defaults: */ struct files_stat_struct { unsigned long nr_files; /* read only */ @@ -152,6 +188,8 @@ struct inodes_stat_t { #define BLKSECDISCARD _IO(0x12,125) #define BLKROTATIONAL _IO(0x12,126) #define BLKZEROOUT _IO(0x12,127) +#define BLKDAXSET _IO(0x12,128) +#define BLKDAXGET _IO(0x12,129) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ @@ -159,6 +197,9 @@ struct inodes_stat_t { #define FIFREEZE _IOWR('X', 119, int) /* Freeze */ #define FITHAW _IOWR('X', 120, int) /* Thaw */ #define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */ +#define FICLONE _IOW(0x94, 9, int) +#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) +#define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range) #define FS_IOC_GETFLAGS _IOR('f', 1, long) #define FS_IOC_SETFLAGS _IOW('f', 2, long) diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h index 1a763eaae0bb..7c4be7711c81 100644 --- a/include/uapi/linux/gfs2_ondisk.h +++ b/include/uapi/linux/gfs2_ondisk.h @@ -297,6 +297,8 @@ struct gfs2_dinode { #define GFS2_FNAMESIZE 255 #define GFS2_DIRENT_SIZE(name_len) ((sizeof(struct gfs2_dirent) + (name_len) + 7) & ~7) +#define GFS2_MIN_DIRENT_SIZE (GFS2_DIRENT_SIZE(1)) + struct gfs2_dirent { struct gfs2_inum de_inum; @@ -304,11 +306,12 @@ struct gfs2_dirent { __be16 de_rec_len; __be16 de_name_len; __be16 de_type; + __be16 de_rahead; union { - __u8 __pad[14]; + __u8 __pad[12]; struct { - __be16 de_rahead; - __u8 pad2[12]; + __u32 de_cookie; /* ondisk value not used */ + __u8 pad3[8]; }; }; }; diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h index ca18c45f8304..ebf8fd885dd5 100644 --- a/include/uapi/linux/hash_info.h +++ b/include/uapi/linux/hash_info.h @@ -31,6 +31,7 @@ enum hash_algo { HASH_ALGO_TGR_128, HASH_ALGO_TGR_160, HASH_ALGO_TGR_192, + HASH_ALGO_SM3_256, HASH_ALGO__LAST }; diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index e4c0a35d6417..e347b24ef9fb 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool { #define HV_INVALIDARG 0x80070057 #define HV_GUID_NOTFOUND 0x80041002 #define HV_ERROR_ALREADY_EXISTS 0x80070050 +#define HV_ERROR_DISK_FULL 0x80070070 #define ADDR_FAMILY_NONE 0x00 #define ADDR_FAMILY_IPV4 0x01 diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5ad57375a99f..a30b78090594 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -218,6 +218,7 @@ enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, + IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ @@ -462,6 +463,9 @@ enum { IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, IFLA_GENEVE_REMOTE6, + IFLA_GENEVE_UDP_CSUM, + IFLA_GENEVE_UDP_ZERO_CSUM6_TX, + IFLA_GENEVE_UDP_ZERO_CSUM6_RX, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h index 7ed9e670814e..abde7bbd6f3b 100644 --- a/include/uapi/linux/ila.h +++ b/include/uapi/linux/ila.h @@ -3,13 +3,35 @@ #ifndef _UAPI_LINUX_ILA_H #define _UAPI_LINUX_ILA_H +/* NETLINK_GENERIC related info */ +#define ILA_GENL_NAME "ila" +#define ILA_GENL_VERSION 0x1 + enum { ILA_ATTR_UNSPEC, ILA_ATTR_LOCATOR, /* u64 */ + ILA_ATTR_IDENTIFIER, /* u64 */ + ILA_ATTR_LOCATOR_MATCH, /* u64 */ + ILA_ATTR_IFINDEX, /* s32 */ + ILA_ATTR_DIR, /* u32 */ __ILA_ATTR_MAX, }; #define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1) +enum { + ILA_CMD_UNSPEC, + ILA_CMD_ADD, + ILA_CMD_DEL, + ILA_CMD_GET, + + __ILA_CMD_MAX, +}; + +#define ILA_CMD_MAX (__ILA_CMD_MAX - 1) + +#define ILA_DIR_IN (1 << 0) +#define ILA_DIR_OUT (1 << 1) + #endif /* _UAPI_LINUX_ILA_H */ diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 79b12b004ade..318a4828bf98 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -196,6 +196,7 @@ struct in6_flowlabel_req { #define IPV6_IPSEC_POLICY 34 #define IPV6_XFRM_POLICY 35 +#define IPV6_HDRINCL 36 #endif /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 03f3618612aa..9da905157cee 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -154,6 +154,20 @@ struct kvm_s390_skeys { __u32 flags; __u32 reserved[9]; }; + +struct kvm_hyperv_exit { +#define KVM_EXIT_HYPERV_SYNIC 1 + __u32 type; + union { + struct { + __u32 msr; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + } u; +}; + #define KVM_S390_GET_SKEYS_NONE 1 #define KVM_S390_SKEYS_MAX 1048576 @@ -184,6 +198,7 @@ struct kvm_s390_skeys { #define KVM_EXIT_SYSTEM_EVENT 24 #define KVM_EXIT_S390_STSI 25 #define KVM_EXIT_IOAPIC_EOI 26 +#define KVM_EXIT_HYPERV 27 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -338,6 +353,8 @@ struct kvm_run { struct { __u8 vector; } eoi; + /* KVM_EXIT_HYPERV */ + struct kvm_hyperv_exit hyperv; /* Fix the size of the union. */ char padding[256]; }; @@ -831,6 +848,8 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_GUEST_DEBUG_HW_WPS 120 #define KVM_CAP_SPLIT_IRQCHIP 121 #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 +#define KVM_CAP_HYPERV_SYNIC 123 +#define KVM_CAP_S390_RI 124 #ifdef KVM_CAP_IRQ_ROUTING @@ -854,10 +873,16 @@ struct kvm_irq_routing_s390_adapter { __u32 adapter_id; }; +struct kvm_irq_routing_hv_sint { + __u32 vcpu; + __u32 sint; +}; + /* gsi routing entry types */ #define KVM_IRQ_ROUTING_IRQCHIP 1 #define KVM_IRQ_ROUTING_MSI 2 #define KVM_IRQ_ROUTING_S390_ADAPTER 3 +#define KVM_IRQ_ROUTING_HV_SINT 4 struct kvm_irq_routing_entry { __u32 gsi; @@ -868,6 +893,7 @@ struct kvm_irq_routing_entry { struct kvm_irq_routing_irqchip irqchip; struct kvm_irq_routing_msi msi; struct kvm_irq_routing_s390_adapter adapter; + struct kvm_irq_routing_hv_sint hv_sint; __u32 pad[8]; } u; }; diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h new file mode 100644 index 000000000000..4b3ab2966b5a --- /dev/null +++ b/include/uapi/linux/lirc.h @@ -0,0 +1,168 @@ +/* + * lirc.h - linux infrared remote control header file + * last modified 2010/07/13 by Jarod Wilson + */ + +#ifndef _LINUX_LIRC_H +#define _LINUX_LIRC_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define PULSE_BIT 0x01000000 +#define PULSE_MASK 0x00FFFFFF + +#define LIRC_MODE2_SPACE 0x00000000 +#define LIRC_MODE2_PULSE 0x01000000 +#define LIRC_MODE2_FREQUENCY 0x02000000 +#define LIRC_MODE2_TIMEOUT 0x03000000 + +#define LIRC_VALUE_MASK 0x00FFFFFF +#define LIRC_MODE2_MASK 0xFF000000 + +#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) +#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) +#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) +#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) + +#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) +#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) + +#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) +#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) +#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) +#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) + +/* used heavily by lirc userspace */ +#define lirc_t int + +/*** lirc compatible hardware features ***/ + +#define LIRC_MODE2SEND(x) (x) +#define LIRC_SEND2MODE(x) (x) +#define LIRC_MODE2REC(x) ((x) << 16) +#define LIRC_REC2MODE(x) ((x) >> 16) + +#define LIRC_MODE_RAW 0x00000001 +#define LIRC_MODE_PULSE 0x00000002 +#define LIRC_MODE_MODE2 0x00000004 +#define LIRC_MODE_LIRCCODE 0x00000010 + + +#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) +#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) +#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) +#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_SEND_MASK 0x0000003f + +#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 +#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 +#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 + +#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) +#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) +#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) +#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) + +#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) +#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) + +#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 +#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 +#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 +#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 +#define LIRC_CAN_SET_REC_FILTER 0x08000000 + +#define LIRC_CAN_MEASURE_CARRIER 0x02000000 +#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 + +#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) +#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) + +#define LIRC_CAN_NOTIFY_DECODE 0x01000000 + +/*** IOCTL commands for lirc driver ***/ + +#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) + +#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) +#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) +#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) +#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) +#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) +#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) +#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) + +#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) +#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) + +#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) +#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) +#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) +#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) + +/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ +#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) + +#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) +#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) +/* Note: these can reset the according pulse_width */ +#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) +#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) +#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) +#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) +#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) + +/* + * when a timeout != 0 is set the driver will send a + * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is + * never sent, timeout is disabled by default + */ +#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) + +/* 1 enables, 0 disables timeout reports in MODE2 */ +#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) + +/* + * pulses shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x0000001a, __u32) +/* + * spaces shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001b, __u32) +/* + * if filter cannot be set independently for pulse/space, this should + * be used + */ +#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001c, __u32) + +/* + * if enabled from the next key press on the driver will send + * LIRC_MODE2_FREQUENCY packets + */ +#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) + +/* + * to set a range use + * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the + * lower bound first and later + * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound + */ + +#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) +#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) + +#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) + +#define LIRC_SETUP_START _IO('i', 0x00000021) +#define LIRC_SETUP_END _IO('i', 0x00000022) + +#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) + +#endif diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index accb036bbc9c..b283d56c1db9 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -54,6 +54,7 @@ #define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb +#define CGROUP2_SUPER_MAGIC 0x63677270 #define STACK_END_MAGIC 0x57AC6E9D diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 4e816be3de39..1e3c8cb43bd7 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -23,6 +23,9 @@ #ifndef __LINUX_MEDIA_H #define __LINUX_MEDIA_H +#ifndef __KERNEL__ +#include <stdint.h> +#endif #include <linux/ioctl.h> #include <linux/types.h> #include <linux/version.h> @@ -42,33 +45,107 @@ struct media_device_info { #define MEDIA_ENT_ID_FLAG_NEXT (1 << 31) +/* + * Initial value to be used when a new entity is created + * Drivers should change it to something useful + */ +#define MEDIA_ENT_F_UNKNOWN 0x00000000 + +/* + * Base number ranges for entity functions + * + * NOTE: those ranges and entity function number are phased just to + * make it easier to maintain this file. Userspace should not rely on + * the ranges to identify a group of function types, as newer + * functions can be added with any name within the full u32 range. + */ +#define MEDIA_ENT_F_BASE 0x00000000 +#define MEDIA_ENT_F_OLD_BASE 0x00010000 +#define MEDIA_ENT_F_OLD_SUBDEV_BASE 0x00020000 + +/* + * DVB entities + */ +#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 1) +#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 2) +#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 3) +#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 4) + +/* + * Connectors + */ +/* It is a responsibility of the entity drivers to add connectors and links */ +#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 21) +#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 22) +#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 23) +/* For internal test signal generators and other debug connectors */ +#define MEDIA_ENT_F_CONN_TEST (MEDIA_ENT_F_BASE + 24) + +/* + * I/O entities + */ +#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 31) +#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 32) +#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 33) + +/* + * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and + * MEDIA_ENT_F_OLD_SUBDEV_BASE are kept to keep backward compatibility + * with the legacy v1 API.The number range is out of range by purpose: + * several previously reserved numbers got excluded from this range. + * + * Subdevs are initialized with MEDIA_ENT_T_V4L2_SUBDEV_UNKNOWN, + * in order to preserve backward compatibility. + * Drivers must change to the proper subdev type before + * registering the entity. + */ + +#define MEDIA_ENT_F_IO_V4L (MEDIA_ENT_F_OLD_BASE + 1) + +#define MEDIA_ENT_F_CAM_SENSOR (MEDIA_ENT_F_OLD_SUBDEV_BASE + 1) +#define MEDIA_ENT_F_FLASH (MEDIA_ENT_F_OLD_SUBDEV_BASE + 2) +#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3) +#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4) +/* + * It is a responsibility of the entity drivers to add connectors and links + * for the tuner entities. + */ +#define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5) + +#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE + +#ifndef __KERNEL__ + +/* + * Legacy symbols used to avoid userspace compilation breakages + * + * Those symbols map the entity function into types and should be + * used only on legacy programs for legacy hardware. Don't rely + * on those for MEDIA_IOC_G_TOPOLOGY. + */ #define MEDIA_ENT_TYPE_SHIFT 16 #define MEDIA_ENT_TYPE_MASK 0x00ff0000 #define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff -#define MEDIA_ENT_T_DEVNODE (1 << MEDIA_ENT_TYPE_SHIFT) -#define MEDIA_ENT_T_DEVNODE_V4L (MEDIA_ENT_T_DEVNODE + 1) +#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE +#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L #define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2) #define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3) -#define MEDIA_ENT_T_DEVNODE_DVB_FE (MEDIA_ENT_T_DEVNODE + 4) -#define MEDIA_ENT_T_DEVNODE_DVB_DEMUX (MEDIA_ENT_T_DEVNODE + 5) -#define MEDIA_ENT_T_DEVNODE_DVB_DVR (MEDIA_ENT_T_DEVNODE + 6) -#define MEDIA_ENT_T_DEVNODE_DVB_CA (MEDIA_ENT_T_DEVNODE + 7) -#define MEDIA_ENT_T_DEVNODE_DVB_NET (MEDIA_ENT_T_DEVNODE + 8) - -/* Legacy symbol. Use it to avoid userspace compilation breakages */ -#define MEDIA_ENT_T_DEVNODE_DVB MEDIA_ENT_T_DEVNODE_DVB_FE - -#define MEDIA_ENT_T_V4L2_SUBDEV (2 << MEDIA_ENT_TYPE_SHIFT) -#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR (MEDIA_ENT_T_V4L2_SUBDEV + 1) -#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH (MEDIA_ENT_T_V4L2_SUBDEV + 2) -#define MEDIA_ENT_T_V4L2_SUBDEV_LENS (MEDIA_ENT_T_V4L2_SUBDEV + 3) -/* A converter of analogue video to its digital representation. */ -#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER (MEDIA_ENT_T_V4L2_SUBDEV + 4) +#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4) -#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER (MEDIA_ENT_T_V4L2_SUBDEV + 5) +#define MEDIA_ENT_T_UNKNOWN MEDIA_ENT_F_UNKNOWN +#define MEDIA_ENT_T_V4L2_VIDEO MEDIA_ENT_F_IO_V4L +#define MEDIA_ENT_T_V4L2_SUBDEV MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN +#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR MEDIA_ENT_F_CAM_SENSOR +#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH MEDIA_ENT_F_FLASH +#define MEDIA_ENT_T_V4L2_SUBDEV_LENS MEDIA_ENT_F_LENS +#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER +#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER +#endif +/* Entity flags */ #define MEDIA_ENT_FL_DEFAULT (1 << 0) +#define MEDIA_ENT_FL_CONNECTOR (1 << 1) struct media_entity_desc { __u32 id; @@ -151,6 +228,10 @@ struct media_pad_desc { #define MEDIA_LNK_FL_IMMUTABLE (1 << 1) #define MEDIA_LNK_FL_DYNAMIC (1 << 2) +#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28) +# define MEDIA_LNK_FL_DATA_LINK (0 << 28) +# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28) + struct media_link_desc { struct media_pad_desc source; struct media_pad_desc sink; @@ -167,9 +248,120 @@ struct media_links_enum { __u32 reserved[4]; }; +/* Interface type ranges */ + +#define MEDIA_INTF_T_DVB_BASE 0x00000100 +#define MEDIA_INTF_T_V4L_BASE 0x00000200 + +/* Interface types */ + +#define MEDIA_INTF_T_DVB_FE (MEDIA_INTF_T_DVB_BASE) +#define MEDIA_INTF_T_DVB_DEMUX (MEDIA_INTF_T_DVB_BASE + 1) +#define MEDIA_INTF_T_DVB_DVR (MEDIA_INTF_T_DVB_BASE + 2) +#define MEDIA_INTF_T_DVB_CA (MEDIA_INTF_T_DVB_BASE + 3) +#define MEDIA_INTF_T_DVB_NET (MEDIA_INTF_T_DVB_BASE + 4) + +#define MEDIA_INTF_T_V4L_VIDEO (MEDIA_INTF_T_V4L_BASE) +#define MEDIA_INTF_T_V4L_VBI (MEDIA_INTF_T_V4L_BASE + 1) +#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2) +#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3) +#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4) + +/* + * MC next gen API definitions + * + * NOTE: The declarations below are close to the MC RFC for the Media + * Controller, the next generation. Yet, there are a few adjustments + * to do, as we want to be able to have a functional API before + * the MC properties change. Those will be properly marked below. + * Please also notice that I removed "num_pads", "num_links", + * from the proposal, as a proper userspace application will likely + * use lists for pads/links, just as we intend to do in Kernelspace. + * The API definition should be freed from fields that are bound to + * some specific data structure. + * + * FIXME: Currently, I opted to name the new types as "media_v2", as this + * won't cause any conflict with the Kernelspace namespace, nor with + * the previous kAPI media_*_desc namespace. This can be changed + * later, before the adding this API upstream. + */ + +#if 0 /* Let's postpone it to Kernel 4.6 */ +struct media_v2_entity { + __u32 id; + char name[64]; /* FIXME: move to a property? (RFC says so) */ + __u32 function; /* Main function of the entity */ + __u16 reserved[12]; +}; + +/* Should match the specific fields at media_intf_devnode */ +struct media_v2_intf_devnode { + __u32 major; + __u32 minor; +}; + +struct media_v2_interface { + __u32 id; + __u32 intf_type; + __u32 flags; + __u32 reserved[9]; + + union { + struct media_v2_intf_devnode devnode; + __u32 raw[16]; + }; +}; + +struct media_v2_pad { + __u32 id; + __u32 entity_id; + __u32 flags; + __u16 reserved[9]; +}; + +struct media_v2_link { + __u32 id; + __u32 source_id; + __u32 sink_id; + __u32 flags; + __u32 reserved[5]; +}; + +struct media_v2_topology { + __u64 topology_version; + + __u32 num_entities; + __u32 reserved1; + __u64 ptr_entities; + + __u32 num_interfaces; + __u32 reserved2; + __u64 ptr_interfaces; + + __u32 num_pads; + __u32 reserved3; + __u64 ptr_pads; + + __u32 num_links; + __u32 reserved4; + __u64 ptr_links; +}; + +static inline void __user *media_get_uptr(__u64 arg) +{ + return (void __user *)(uintptr_t)arg; +} +#endif + +/* ioctls */ + #define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info) #define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc) #define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) #define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) +#if 0 /* Let's postpone it to Kernel 4.6 */ +#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) +#endif + #endif /* __LINUX_MEDIA_H */ diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index a382d2c04a42..cf943016930f 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -4,15 +4,13 @@ #include <linux/sockios.h> #include <linux/types.h> -/* - * Based on the MROUTING 3.5 defines primarily to keep - * source compatibility with BSD. +/* Based on the MROUTING 3.5 defines primarily to keep + * source compatibility with BSD. * - * See the mrouted code for the original history. - * - * Protocol Independent Multicast (PIM) data structures included - * Carlos Picoto (cap@di.fc.ul.pt) + * See the mrouted code for the original history. * + * Protocol Independent Multicast (PIM) data structures included + * Carlos Picoto (cap@di.fc.ul.pt) */ #define MRT_BASE 200 @@ -34,15 +32,13 @@ #define SIOCGETSGCNT (SIOCPROTOPRIVATE+1) #define SIOCGETRPF (SIOCPROTOPRIVATE+2) -#define MAXVIFS 32 +#define MAXVIFS 32 typedef unsigned long vifbitmap_t; /* User mode code depends on this lot */ typedef unsigned short vifi_t; #define ALL_VIFS ((vifi_t)(-1)) -/* - * Same idea as select - */ - +/* Same idea as select */ + #define VIFM_SET(n,m) ((m)|=(1<<(n))) #define VIFM_CLR(n,m) ((m)&=~(1<<(n))) #define VIFM_ISSET(n,m) ((m)&(1<<(n))) @@ -50,11 +46,9 @@ typedef unsigned short vifi_t; #define VIFM_COPY(mfrom,mto) ((mto)=(mfrom)) #define VIFM_SAME(m1,m2) ((m1)==(m2)) -/* - * Passed by mrouted for an MRT_ADD_VIF - again we use the - * mrouted 3.6 structures for compatibility +/* Passed by mrouted for an MRT_ADD_VIF - again we use the + * mrouted 3.6 structures for compatibility */ - struct vifctl { vifi_t vifc_vifi; /* Index of VIF */ unsigned char vifc_flags; /* VIFF_ flags */ @@ -73,10 +67,7 @@ struct vifctl { #define VIFF_USE_IFINDEX 0x8 /* use vifc_lcl_ifindex instead of vifc_lcl_addr to find an interface */ -/* - * Cache manipulation structures for mrouted and PIMd - */ - +/* Cache manipulation structures for mrouted and PIMd */ struct mfcctl { struct in_addr mfcc_origin; /* Origin of mcast */ struct in_addr mfcc_mcastgrp; /* Group in question */ @@ -88,10 +79,7 @@ struct mfcctl { int mfcc_expire; }; -/* - * Group count retrieval for mrouted - */ - +/* Group count retrieval for mrouted */ struct sioc_sg_req { struct in_addr src; struct in_addr grp; @@ -100,10 +88,7 @@ struct sioc_sg_req { unsigned long wrong_if; }; -/* - * To get vif packet counts - */ - +/* To get vif packet counts */ struct sioc_vif_req { vifi_t vifi; /* Which iface */ unsigned long icount; /* In packets */ @@ -112,11 +97,9 @@ struct sioc_vif_req { unsigned long obytes; /* Out bytes */ }; -/* - * This is the format the mroute daemon expects to see IGMP control - * data. Magically happens to be like an IP packet as per the original +/* This is the format the mroute daemon expects to see IGMP control + * data. Magically happens to be like an IP packet as per the original */ - struct igmpmsg { __u32 unused1,unused2; unsigned char im_msgtype; /* What is this */ @@ -126,21 +109,13 @@ struct igmpmsg { struct in_addr im_src,im_dst; }; -/* - * That's all usermode folks - */ - - +/* That's all usermode folks */ #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ -/* - * Pseudo messages used by mrouted - */ - +/* Pseudo messages used by mrouted */ #define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */ #define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */ #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ - #endif /* _UAPI__LINUX_MROUTE_H */ diff --git a/include/uapi/linux/netfilter/ipset/ip_set_bitmap.h b/include/uapi/linux/netfilter/ipset/ip_set_bitmap.h index 6a2c038d1888..fd5024d26269 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/uapi/linux/netfilter/ipset/ip_set_bitmap.h @@ -1,6 +1,8 @@ #ifndef _UAPI__IP_SET_BITMAP_H #define _UAPI__IP_SET_BITMAP_H +#include <linux/netfilter/ipset/ip_set.h> + /* Bitmap type specific error codes */ enum { /* The element is out of the range of the set */ diff --git a/include/uapi/linux/netfilter/ipset/ip_set_hash.h b/include/uapi/linux/netfilter/ipset/ip_set_hash.h index 352eeccdc7f2..82deeb883ac4 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set_hash.h +++ b/include/uapi/linux/netfilter/ipset/ip_set_hash.h @@ -1,6 +1,8 @@ #ifndef _UAPI__IP_SET_HASH_H #define _UAPI__IP_SET_HASH_H +#include <linux/netfilter/ipset/ip_set.h> + /* Hash type specific error codes */ enum { /* Hash is full */ diff --git a/include/uapi/linux/netfilter/ipset/ip_set_list.h b/include/uapi/linux/netfilter/ipset/ip_set_list.h index a44efaa98213..84d430368266 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set_list.h +++ b/include/uapi/linux/netfilter/ipset/ip_set_list.h @@ -1,6 +1,8 @@ #ifndef _UAPI__IP_SET_LIST_H #define _UAPI__IP_SET_LIST_H +#include <linux/netfilter/ipset/ip_set.h> + /* List type specific error codes */ enum { /* Set name to be added/deleted/tested does not exist. */ diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h index ed4e776e1242..2cbc366c3fb4 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h +++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h @@ -1,5 +1,5 @@ -#ifndef _NF_CONNTRACK_SCTP_H -#define _NF_CONNTRACK_SCTP_H +#ifndef _UAPI_NF_CONNTRACK_SCTP_H +#define _UAPI_NF_CONNTRACK_SCTP_H /* SCTP tracking. */ #include <linux/netfilter/nf_conntrack_tuple_common.h> @@ -18,10 +18,4 @@ enum sctp_conntrack { SCTP_CONNTRACK_MAX }; -struct ip_ct_sctp { - enum sctp_conntrack state; - - __be32 vtag[IP_CT_DIR_MAX]; -}; - -#endif /* _NF_CONNTRACK_SCTP_H */ +#endif /* _UAPI_NF_CONNTRACK_SCTP_H */ diff --git a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h index 2f6bbc5b8125..a9c3834abdd4 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_tuple_common.h @@ -1,6 +1,9 @@ #ifndef _NF_CONNTRACK_TUPLE_COMMON_H #define _NF_CONNTRACK_TUPLE_COMMON_H +#include <linux/types.h> +#include <linux/netfilter.h> + enum ip_conntrack_dir { IP_CT_DIR_ORIGINAL, IP_CT_DIR_REPLY, diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index d8c8a7c9d88a..be41ffc128b8 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -83,6 +83,7 @@ enum nft_verdicts { * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes) * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes) * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes) + * @NFT_MSG_TRACE: trace event (enum nft_trace_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -102,6 +103,7 @@ enum nf_tables_msg_types { NFT_MSG_DELSETELEM, NFT_MSG_NEWGEN, NFT_MSG_GETGEN, + NFT_MSG_TRACE, NFT_MSG_MAX, }; @@ -289,6 +291,7 @@ enum nft_set_desc_attributes { * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32) * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64) * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32) + * @NFTA_SET_USERDATA: user data (NLA_BINARY) */ enum nft_set_attributes { NFTA_SET_UNSPEC, @@ -304,6 +307,7 @@ enum nft_set_attributes { NFTA_SET_ID, NFTA_SET_TIMEOUT, NFTA_SET_GC_INTERVAL, + NFTA_SET_USERDATA, __NFTA_SET_MAX }; #define NFTA_SET_MAX (__NFTA_SET_MAX - 1) @@ -598,12 +602,26 @@ enum nft_payload_bases { }; /** + * enum nft_payload_csum_types - nf_tables payload expression checksum types + * + * @NFT_PAYLOAD_CSUM_NONE: no checksumming + * @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791) + */ +enum nft_payload_csum_types { + NFT_PAYLOAD_CSUM_NONE, + NFT_PAYLOAD_CSUM_INET, +}; + +/** * enum nft_payload_attributes - nf_tables payload expression netlink attributes * * @NFTA_PAYLOAD_DREG: destination register to load data into (NLA_U32: nft_registers) * @NFTA_PAYLOAD_BASE: payload base (NLA_U32: nft_payload_bases) * @NFTA_PAYLOAD_OFFSET: payload offset relative to base (NLA_U32) * @NFTA_PAYLOAD_LEN: payload length (NLA_U32) + * @NFTA_PAYLOAD_SREG: source register to load data from (NLA_U32: nft_registers) + * @NFTA_PAYLOAD_CSUM_TYPE: checksum type (NLA_U32) + * @NFTA_PAYLOAD_CSUM_OFFSET: checksum offset relative to base (NLA_U32) */ enum nft_payload_attributes { NFTA_PAYLOAD_UNSPEC, @@ -611,6 +629,9 @@ enum nft_payload_attributes { NFTA_PAYLOAD_BASE, NFTA_PAYLOAD_OFFSET, NFTA_PAYLOAD_LEN, + NFTA_PAYLOAD_SREG, + NFTA_PAYLOAD_CSUM_TYPE, + NFTA_PAYLOAD_CSUM_OFFSET, __NFTA_PAYLOAD_MAX }; #define NFTA_PAYLOAD_MAX (__NFTA_PAYLOAD_MAX - 1) @@ -736,6 +757,8 @@ enum nft_ct_keys { NFT_CT_PROTO_SRC, NFT_CT_PROTO_DST, NFT_CT_LABELS, + NFT_CT_PKTS, + NFT_CT_BYTES, }; /** @@ -761,6 +784,10 @@ enum nft_limit_type { NFT_LIMIT_PKT_BYTES }; +enum nft_limit_flags { + NFT_LIMIT_F_INV = (1 << 0), +}; + /** * enum nft_limit_attributes - nf_tables limit expression netlink attributes * @@ -768,6 +795,7 @@ enum nft_limit_type { * @NFTA_LIMIT_UNIT: refill unit (NLA_U64) * @NFTA_LIMIT_BURST: burst (NLA_U32) * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type) + * @NFTA_LIMIT_FLAGS: flags (NLA_U32: enum nft_limit_flags) */ enum nft_limit_attributes { NFTA_LIMIT_UNSPEC, @@ -775,6 +803,7 @@ enum nft_limit_attributes { NFTA_LIMIT_UNIT, NFTA_LIMIT_BURST, NFTA_LIMIT_TYPE, + NFTA_LIMIT_FLAGS, __NFTA_LIMIT_MAX }; #define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1) @@ -959,6 +988,18 @@ enum nft_dup_attributes { #define NFTA_DUP_MAX (__NFTA_DUP_MAX - 1) /** + * enum nft_fwd_attributes - nf_tables fwd expression netlink attributes + * + * @NFTA_FWD_SREG_DEV: source register of output interface (NLA_U32: nft_register) + */ +enum nft_fwd_attributes { + NFTA_FWD_UNSPEC, + NFTA_FWD_SREG_DEV, + __NFTA_FWD_MAX +}; +#define NFTA_FWD_MAX (__NFTA_FWD_MAX - 1) + +/** * enum nft_gen_attributes - nf_tables ruleset generation attributes * * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) @@ -970,4 +1011,54 @@ enum nft_gen_attributes { }; #define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1) +/** + * enum nft_trace_attributes - nf_tables trace netlink attributes + * + * @NFTA_TRACE_TABLE: name of the table (NLA_STRING) + * @NFTA_TRACE_CHAIN: name of the chain (NLA_STRING) + * @NFTA_TRACE_RULE_HANDLE: numeric handle of the rule (NLA_U64) + * @NFTA_TRACE_TYPE: type of the event (NLA_U32: nft_trace_types) + * @NFTA_TRACE_VERDICT: verdict returned by hook (NLA_NESTED: nft_verdicts) + * @NFTA_TRACE_ID: pseudo-id, same for each skb traced (NLA_U32) + * @NFTA_TRACE_LL_HEADER: linklayer header (NLA_BINARY) + * @NFTA_TRACE_NETWORK_HEADER: network header (NLA_BINARY) + * @NFTA_TRACE_TRANSPORT_HEADER: transport header (NLA_BINARY) + * @NFTA_TRACE_IIF: indev ifindex (NLA_U32) + * @NFTA_TRACE_IIFTYPE: netdev->type of indev (NLA_U16) + * @NFTA_TRACE_OIF: outdev ifindex (NLA_U32) + * @NFTA_TRACE_OIFTYPE: netdev->type of outdev (NLA_U16) + * @NFTA_TRACE_MARK: nfmark (NLA_U32) + * @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32) + * @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32) + */ +enum nft_trace_attibutes { + NFTA_TRACE_UNSPEC, + NFTA_TRACE_TABLE, + NFTA_TRACE_CHAIN, + NFTA_TRACE_RULE_HANDLE, + NFTA_TRACE_TYPE, + NFTA_TRACE_VERDICT, + NFTA_TRACE_ID, + NFTA_TRACE_LL_HEADER, + NFTA_TRACE_NETWORK_HEADER, + NFTA_TRACE_TRANSPORT_HEADER, + NFTA_TRACE_IIF, + NFTA_TRACE_IIFTYPE, + NFTA_TRACE_OIF, + NFTA_TRACE_OIFTYPE, + NFTA_TRACE_MARK, + NFTA_TRACE_NFPROTO, + NFTA_TRACE_POLICY, + __NFTA_TRACE_MAX +}; +#define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1) + +enum nft_trace_types { + NFT_TRACETYPE_UNSPEC, + NFT_TRACETYPE_POLICY, + NFT_TRACETYPE_RETURN, + NFT_TRACETYPE_RULE, + __NFT_TRACETYPE_MAX +}; +#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1) #endif /* _LINUX_NF_TABLES_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h index 354a7e5e50f2..4bb8cb7730e7 100644 --- a/include/uapi/linux/netfilter/nfnetlink.h +++ b/include/uapi/linux/netfilter/nfnetlink.h @@ -22,6 +22,8 @@ enum nfnetlink_groups { #define NFNLGRP_NFTABLES NFNLGRP_NFTABLES NFNLGRP_ACCT_QUOTA, #define NFNLGRP_ACCT_QUOTA NFNLGRP_ACCT_QUOTA + NFNLGRP_NFTRACE, +#define NFNLGRP_NFTRACE NFNLGRP_NFTRACE __NFNLGRP_MAX, }; #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) diff --git a/include/uapi/linux/netfilter/xt_HMARK.h b/include/uapi/linux/netfilter/xt_HMARK.h index 826fc5807577..3fb48c8d8d78 100644 --- a/include/uapi/linux/netfilter/xt_HMARK.h +++ b/include/uapi/linux/netfilter/xt_HMARK.h @@ -2,6 +2,7 @@ #define XT_HMARK_H_ #include <linux/types.h> +#include <linux/netfilter.h> enum { XT_HMARK_SADDR_MASK, diff --git a/include/uapi/linux/netfilter/xt_RATEEST.h b/include/uapi/linux/netfilter/xt_RATEEST.h index 6605e20ad8cf..ec1b57047e03 100644 --- a/include/uapi/linux/netfilter/xt_RATEEST.h +++ b/include/uapi/linux/netfilter/xt_RATEEST.h @@ -2,6 +2,7 @@ #define _XT_RATEEST_TARGET_H #include <linux/types.h> +#include <linux/if.h> struct xt_rateest_target_info { char name[IFNAMSIZ]; diff --git a/include/uapi/linux/netfilter/xt_TEE.h b/include/uapi/linux/netfilter/xt_TEE.h index 5c21d5c829af..01092023404b 100644 --- a/include/uapi/linux/netfilter/xt_TEE.h +++ b/include/uapi/linux/netfilter/xt_TEE.h @@ -1,6 +1,8 @@ #ifndef _XT_TEE_TARGET_H #define _XT_TEE_TARGET_H +#include <linux/netfilter.h> + struct xt_tee_tginfo { union nf_inet_addr gw; char oif[16]; diff --git a/include/uapi/linux/netfilter/xt_TPROXY.h b/include/uapi/linux/netfilter/xt_TPROXY.h index 902043c2073f..8d693eefdc1f 100644 --- a/include/uapi/linux/netfilter/xt_TPROXY.h +++ b/include/uapi/linux/netfilter/xt_TPROXY.h @@ -2,6 +2,7 @@ #define _XT_TPROXY_H #include <linux/types.h> +#include <linux/netfilter.h> /* TPROXY target is capable of marking the packet to perform * redirection. We can get rid of that whenever we get support for diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h index 43acb7e175f6..1e4b37b93bef 100644 --- a/include/uapi/linux/netfilter/xt_cgroup.h +++ b/include/uapi/linux/netfilter/xt_cgroup.h @@ -2,10 +2,23 @@ #define _UAPI_XT_CGROUP_H #include <linux/types.h> +#include <linux/limits.h> -struct xt_cgroup_info { +struct xt_cgroup_info_v0 { __u32 id; __u32 invert; }; +struct xt_cgroup_info_v1 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + char path[PATH_MAX]; + __u32 classid; + + /* kernel internal data */ + void *priv __attribute__((aligned(8))); +}; + #endif /* _UAPI_XT_CGROUP_H */ diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h index cbfc43d1af68..6db90372f09c 100644 --- a/include/uapi/linux/netfilter/xt_hashlimit.h +++ b/include/uapi/linux/netfilter/xt_hashlimit.h @@ -2,6 +2,7 @@ #define _UAPI_XT_HASHLIMIT_H #include <linux/types.h> +#include <linux/if.h> /* timings are in milliseconds. */ #define XT_HASHLIMIT_SCALE 10000 diff --git a/include/uapi/linux/netfilter/xt_ipvs.h b/include/uapi/linux/netfilter/xt_ipvs.h index eff34ac18808..e03b9c31a39d 100644 --- a/include/uapi/linux/netfilter/xt_ipvs.h +++ b/include/uapi/linux/netfilter/xt_ipvs.h @@ -2,6 +2,7 @@ #define _XT_IPVS_H #include <linux/types.h> +#include <linux/netfilter.h> enum { XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ diff --git a/include/uapi/linux/netfilter/xt_mac.h b/include/uapi/linux/netfilter/xt_mac.h index b892cdc67e06..9a19a08a9181 100644 --- a/include/uapi/linux/netfilter/xt_mac.h +++ b/include/uapi/linux/netfilter/xt_mac.h @@ -1,6 +1,8 @@ #ifndef _XT_MAC_H #define _XT_MAC_H +#include <linux/if_ether.h> + struct xt_mac_info { unsigned char srcaddr[ETH_ALEN]; int invert; diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h index 5d66caeba3ee..e6159958b2fb 100644 --- a/include/uapi/linux/netfilter/xt_osf.h +++ b/include/uapi/linux/netfilter/xt_osf.h @@ -20,6 +20,8 @@ #define _XT_OSF_H #include <linux/types.h> +#include <linux/ip.h> +#include <linux/tcp.h> #define MAXGENRELEN 32 diff --git a/include/uapi/linux/netfilter/xt_physdev.h b/include/uapi/linux/netfilter/xt_physdev.h index db7a2982e9c0..ccdde87da214 100644 --- a/include/uapi/linux/netfilter/xt_physdev.h +++ b/include/uapi/linux/netfilter/xt_physdev.h @@ -2,7 +2,7 @@ #define _UAPI_XT_PHYSDEV_H #include <linux/types.h> - +#include <linux/if.h> #define XT_PHYSDEV_OP_IN 0x01 #define XT_PHYSDEV_OP_OUT 0x02 diff --git a/include/uapi/linux/netfilter/xt_policy.h b/include/uapi/linux/netfilter/xt_policy.h index be8ead05c316..d8a9800dce61 100644 --- a/include/uapi/linux/netfilter/xt_policy.h +++ b/include/uapi/linux/netfilter/xt_policy.h @@ -2,6 +2,8 @@ #define _XT_POLICY_H #include <linux/types.h> +#include <linux/in.h> +#include <linux/in6.h> #define XT_POLICY_MAX_ELEM 4 diff --git a/include/uapi/linux/netfilter/xt_rateest.h b/include/uapi/linux/netfilter/xt_rateest.h index d40a6196842a..13fe50d4e4b3 100644 --- a/include/uapi/linux/netfilter/xt_rateest.h +++ b/include/uapi/linux/netfilter/xt_rateest.h @@ -2,6 +2,7 @@ #define _XT_RATEEST_MATCH_H #include <linux/types.h> +#include <linux/if.h> enum xt_rateest_match_flags { XT_RATEEST_MATCH_INVERT = 1<<0, diff --git a/include/uapi/linux/netfilter/xt_recent.h b/include/uapi/linux/netfilter/xt_recent.h index 6ef36c113e89..955d562031cc 100644 --- a/include/uapi/linux/netfilter/xt_recent.h +++ b/include/uapi/linux/netfilter/xt_recent.h @@ -2,6 +2,7 @@ #define _LINUX_NETFILTER_XT_RECENT_H 1 #include <linux/types.h> +#include <linux/netfilter.h> enum { XT_RECENT_CHECK = 1 << 0, diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h index 29287be696a2..58ffcfb7978e 100644 --- a/include/uapi/linux/netfilter/xt_sctp.h +++ b/include/uapi/linux/netfilter/xt_sctp.h @@ -66,26 +66,26 @@ struct xt_sctp_info { #define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ __sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap)) -static inline bool +static inline _Bool __sctp_chunkmap_is_clear(const __u32 *chunkmap, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) if (chunkmap[i]) - return false; - return true; + return 0; + return 1; } #define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ __sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap)) -static inline bool +static inline _Bool __sctp_chunkmap_is_all_set(const __u32 *chunkmap, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) if (chunkmap[i] != ~0U) - return false; - return true; + return 0; + return 1; } #endif /* _XT_SCTP_H_ */ diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h index a5a86a4db6b3..ece3ad4eecda 100644 --- a/include/uapi/linux/netfilter_arp/arp_tables.h +++ b/include/uapi/linux/netfilter_arp/arp_tables.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/if.h> #include <linux/netfilter_arp.h> #include <linux/netfilter/x_tables.h> diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h index a5eda6db8d79..514519b47651 100644 --- a/include/uapi/linux/netfilter_bridge.h +++ b/include/uapi/linux/netfilter_bridge.h @@ -4,6 +4,7 @@ /* bridge-specific defines for netfilter. */ +#include <linux/in.h> #include <linux/netfilter.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> diff --git a/include/uapi/linux/netfilter_bridge/ebt_arp.h b/include/uapi/linux/netfilter_bridge/ebt_arp.h index 522f3e427f49..dd4df25330e8 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_arp.h +++ b/include/uapi/linux/netfilter_bridge/ebt_arp.h @@ -2,6 +2,7 @@ #define __LINUX_BRIDGE_EBT_ARP_H #include <linux/types.h> +#include <linux/if_ether.h> #define EBT_ARP_OPCODE 0x01 #define EBT_ARP_HTYPE 0x02 diff --git a/include/uapi/linux/netfilter_bridge/ebt_arpreply.h b/include/uapi/linux/netfilter_bridge/ebt_arpreply.h index 7e77896e1fbf..6fee3402e307 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_arpreply.h +++ b/include/uapi/linux/netfilter_bridge/ebt_arpreply.h @@ -1,6 +1,8 @@ #ifndef __LINUX_BRIDGE_EBT_ARPREPLY_H #define __LINUX_BRIDGE_EBT_ARPREPLY_H +#include <linux/if_ether.h> + struct ebt_arpreply_info { unsigned char mac[ETH_ALEN]; int target; diff --git a/include/uapi/linux/netfilter_bridge/ebt_ip6.h b/include/uapi/linux/netfilter_bridge/ebt_ip6.h index 42b889682721..a062f0ce95f9 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_ip6.h +++ b/include/uapi/linux/netfilter_bridge/ebt_ip6.h @@ -13,6 +13,7 @@ #define __LINUX_BRIDGE_EBT_IP6_H #include <linux/types.h> +#include <linux/in6.h> #define EBT_IP6_SOURCE 0x01 #define EBT_IP6_DEST 0x02 diff --git a/include/uapi/linux/netfilter_bridge/ebt_nat.h b/include/uapi/linux/netfilter_bridge/ebt_nat.h index 5e74e3b03bd6..c990d74ee966 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_nat.h +++ b/include/uapi/linux/netfilter_bridge/ebt_nat.h @@ -1,6 +1,8 @@ #ifndef __LINUX_BRIDGE_EBT_NAT_H #define __LINUX_BRIDGE_EBT_NAT_H +#include <linux/if_ether.h> + #define NAT_ARP_BIT (0x00000010) struct ebt_nat_info { unsigned char mac[ETH_ALEN]; diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index fd2ee501726d..e3cdf9f1a259 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -12,6 +12,8 @@ #ifndef _UAPI__LINUX_BRIDGE_EFF_H #define _UAPI__LINUX_BRIDGE_EFF_H +#include <linux/types.h> +#include <linux/if.h> #include <linux/netfilter_bridge.h> #define EBT_TABLE_MAXNAMELEN 32 @@ -33,8 +35,8 @@ struct xt_match; struct xt_target; struct ebt_counter { - uint64_t pcnt; - uint64_t bcnt; + __u64 pcnt; + __u64 bcnt; }; struct ebt_replace { diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h index f1e6ef256034..d0da53d96d93 100644 --- a/include/uapi/linux/netfilter_ipv4/ip_tables.h +++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/if.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h index 649c68062dca..d1b22653daf2 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h +++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/if.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_rt.h b/include/uapi/linux/netfilter_ipv6/ip6t_rt.h index 7605a5ff81cd..558f81e46fb9 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6t_rt.h +++ b/include/uapi/linux/netfilter_ipv6/ip6t_rt.h @@ -2,7 +2,7 @@ #define _IP6T_RT_H #include <linux/types.h> -/*#include <linux/in6.h>*/ +#include <linux/in6.h> #define IP6T_RT_HOPS 16 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 1f0b4cf5dd03..5b7b5ebe7ca8 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -820,6 +820,10 @@ * as an event to indicate changes for devices with wiphy-specific regdom * management. * + * @NL80211_CMD_ABORT_SCAN: Stop an ongoing scan. Returns -ENOENT if a scan is + * not running. The driver indicates the status of the scan through + * cfg80211_scan_done(). + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1006,6 +1010,8 @@ enum nl80211_commands { NL80211_CMD_WIPHY_REG_CHANGE, + NL80211_CMD_ABORT_SCAN, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1764,8 +1770,9 @@ enum nl80211_commands { * over all channels. * * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a - * scheduled scan (or a WoWLAN net-detect scan) is started, u32 - * in seconds. + * scheduled scan is started. Or the delay before a WoWLAN + * net-detect scan is started, counting from the moment the + * system is suspended. This value is a u32, in seconds. * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device * is operating in an indoor environment. diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index d801bb0d9f6d..1afe9623c1a7 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -171,6 +171,9 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -192,6 +195,9 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8d2530daca9f..8cb18b44968e 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -72,6 +72,10 @@ struct tc_estimator { #define TC_H_UNSPEC (0U) #define TC_H_ROOT (0xFFFFFFFFU) #define TC_H_INGRESS (0xFFFFFFF1U) +#define TC_H_CLSACT TC_H_INGRESS + +#define TC_H_MIN_INGRESS 0xFFF2U +#define TC_H_MIN_EGRESS 0xFFF3U /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ enum tc_link_layer { diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h index 1cb8aa6850b5..36cd8210a5d1 100644 --- a/include/uapi/linux/raid/md_u.h +++ b/include/uapi/linux/raid/md_u.h @@ -80,7 +80,7 @@ typedef struct mdu_array_info_s { int major_version; int minor_version; int patch_version; - int ctime; + unsigned int ctime; int level; int size; int nr_disks; @@ -91,7 +91,7 @@ typedef struct mdu_array_info_s { /* * Generic state information */ - int utime; /* 0 Superblock update time */ + unsigned int utime; /* 0 Superblock update time */ int state; /* 1 State bits (clean, ...) */ int active_disks; /* 2 Number of currently active disks */ int working_disks; /* 3 Number of working disks */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 123a5af4e8bb..ca764b5da86d 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -311,6 +311,7 @@ enum rtattr_type_t { RTA_PREF, RTA_ENCAP_TYPE, RTA_ENCAP, + RTA_EXPIRES, __RTA_MAX }; diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 25331f9faa76..5d59c3ebf459 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -69,6 +69,7 @@ struct serial_struct { #define SERIAL_IO_AU 4 #define SERIAL_IO_TSI 5 #define SERIAL_IO_MEM32BE 6 +#define SERIAL_IO_MEM16 7 #define UART_CLEAR_FIFO 0x01 #define UART_USE_FIFO 0x02 diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 93ba148f923e..3e5d757407fb 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -176,7 +176,7 @@ #define PORT_S3C6400 84 -/* NWPSERIAL */ +/* NWPSERIAL, now removed */ #define PORT_NWPSERIAL 85 /* MAX3100 */ diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index becdd78295cc..c2ea1698257f 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -77,5 +77,6 @@ #define SERIO_PS2MULT 0x3c #define SERIO_TSC40 0x3d #define SERIO_WACOM_IV 0x3e +#define SERIO_EGALAX 0x3f #endif /* _UAPI_SERIO_H */ diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h index 49230d36f9ce..bae2d80034d4 100644 --- a/include/uapi/linux/sock_diag.h +++ b/include/uapi/linux/sock_diag.h @@ -4,6 +4,7 @@ #include <linux/types.h> #define SOCK_DIAG_BY_FAMILY 20 +#define SOCK_DESTROY 21 struct sock_diag_req { __u8 sdiag_family; diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h index e888b1aed69f..8e7890b26d9a 100644 --- a/include/uapi/linux/sockios.h +++ b/include/uapi/linux/sockios.h @@ -27,7 +27,7 @@ /* Routing table calls. */ #define SIOCADDRT 0x890B /* add routing table entry */ #define SIOCDELRT 0x890C /* delete routing table entry */ -#define SIOCRTMSG 0x890D /* call to routing system */ +#define SIOCRTMSG 0x890D /* unused */ /* Socket configuration controls. */ #define SIOCGIFNAME 0x8910 /* get iface name */ diff --git a/include/uapi/linux/uinput.h b/include/uapi/linux/uinput.h index 013c9d8db372..dc652e224b67 100644 --- a/include/uapi/linux/uinput.h +++ b/include/uapi/linux/uinput.h @@ -20,6 +20,11 @@ * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> * * Changes/Revisions: + * 0.5 08/13/2015 (David Herrmann <dh.herrmann@gmail.com> & + * Benjamin Tissoires <benjamin.tissoires@redhat.com>) + * - add UI_DEV_SETUP ioctl + * - add UI_ABS_SETUP ioctl + * - add UI_GET_VERSION ioctl * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>) * - add UI_GET_SYSNAME ioctl * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>) @@ -37,8 +42,8 @@ #include <linux/types.h> #include <linux/input.h> -#define UINPUT_VERSION 4 - +#define UINPUT_VERSION 5 +#define UINPUT_MAX_NAME_SIZE 80 struct uinput_ff_upload { __u32 request_id; @@ -58,6 +63,76 @@ struct uinput_ff_erase { #define UI_DEV_CREATE _IO(UINPUT_IOCTL_BASE, 1) #define UI_DEV_DESTROY _IO(UINPUT_IOCTL_BASE, 2) +struct uinput_setup { + struct input_id id; + char name[UINPUT_MAX_NAME_SIZE]; + __u32 ff_effects_max; +}; + +/** + * UI_DEV_SETUP - Set device parameters for setup + * + * This ioctl sets parameters for the input device to be created. It + * supersedes the old "struct uinput_user_dev" method, which wrote this data + * via write(). To actually set the absolute axes UI_ABS_SETUP should be + * used. + * + * The ioctl takes a "struct uinput_setup" object as argument. The fields of + * this object are as follows: + * id: See the description of "struct input_id". This field is + * copied unchanged into the new device. + * name: This is used unchanged as name for the new device. + * ff_effects_max: This limits the maximum numbers of force-feedback effects. + * See below for a description of FF with uinput. + * + * This ioctl can be called multiple times and will overwrite previous values. + * If this ioctl fails with -EINVAL, it is recommended to use the old + * "uinput_user_dev" method via write() as a fallback, in case you run on an + * old kernel that does not support this ioctl. + * + * This ioctl may fail with -EINVAL if it is not supported or if you passed + * incorrect values, -ENOMEM if the kernel runs out of memory or -EFAULT if the + * passed uinput_setup object cannot be read/written. + * If this call fails, partial data may have already been applied to the + * internal device. + */ +#define UI_DEV_SETUP _IOW(UINPUT_IOCTL_BASE, 3, struct uinput_setup) + +struct uinput_abs_setup { + __u16 code; /* axis code */ + /* __u16 filler; */ + struct input_absinfo absinfo; +}; + +/** + * UI_ABS_SETUP - Set absolute axis information for the device to setup + * + * This ioctl sets one absolute axis information for the input device to be + * created. It supersedes the old "struct uinput_user_dev" method, which wrote + * part of this data and the content of UI_DEV_SETUP via write(). + * + * The ioctl takes a "struct uinput_abs_setup" object as argument. The fields + * of this object are as follows: + * code: The corresponding input code associated with this axis + * (ABS_X, ABS_Y, etc...) + * absinfo: See "struct input_absinfo" for a description of this field. + * This field is copied unchanged into the kernel for the + * specified axis. If the axis is not enabled via + * UI_SET_ABSBIT, this ioctl will enable it. + * + * This ioctl can be called multiple times and will overwrite previous values. + * If this ioctl fails with -EINVAL, it is recommended to use the old + * "uinput_user_dev" method via write() as a fallback, in case you run on an + * old kernel that does not support this ioctl. + * + * This ioctl may fail with -EINVAL if it is not supported or if you passed + * incorrect values, -ENOMEM if the kernel runs out of memory or -EFAULT if the + * passed uinput_setup object cannot be read/written. + * If this call fails, partial data may have already been applied to the + * internal device. + */ +#define UI_ABS_SETUP _IOW(UINPUT_IOCTL_BASE, 4, struct uinput_abs_setup) + #define UI_SET_EVBIT _IOW(UINPUT_IOCTL_BASE, 100, int) #define UI_SET_KEYBIT _IOW(UINPUT_IOCTL_BASE, 101, int) #define UI_SET_RELBIT _IOW(UINPUT_IOCTL_BASE, 102, int) @@ -144,7 +219,6 @@ struct uinput_ff_erase { #define UI_FF_UPLOAD 1 #define UI_FF_ERASE 2 -#define UINPUT_MAX_NAME_SIZE 80 struct uinput_user_dev { char name[UINPUT_MAX_NAME_SIZE]; struct input_id id; diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index 3b3b95e01f71..69ab695fad2e 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -28,6 +28,7 @@ /* A.3. Video Interface Protocol Codes */ #define UVC_PC_PROTOCOL_UNDEFINED 0x00 +#define UVC_PC_PROTOCOL_15 0x01 /* A.5. Video Class-Specific VC Interface Descriptor Subtypes */ #define UVC_VC_DESCRIPTOR_UNDEFINED 0x00 diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 1bdce501ad6b..2d225bcdb831 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -158,8 +158,10 @@ enum v4l2_colorfx { * We reserve 16 controls for this driver. */ #define V4L2_CID_USER_S2255_BASE (V4L2_CID_USER_BASE + 0x1030) -/* The base for the si476x driver controls. See include/media/si476x.h for the list - * of controls. Total of 16 controls is reserved for this driver */ +/* + * The base for the si476x driver controls. See include/media/drv-intf/si476x.h + * for the list of controls. Total of 16 controls is reserved for this driver + */ #define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040) /* The base for the TI VPE driver controls. Total of 16 controls is reserved for diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 9fd7b5d8df2f..7d7a4c6f2090 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -39,6 +39,13 @@ #define VFIO_SPAPR_TCE_v2_IOMMU 7 /* + * The No-IOMMU IOMMU offers no translation or isolation for devices and + * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU + * code will taint the host kernel and should be used with extreme caution. + */ +#define VFIO_NOIOMMU_IOMMU 8 + +/* * The IOCTL interface is designed for extensibility by embedding the * structure length (argsz) and flags into structures passed between * kernel and userspace. We therefore use the _IO() macro for these @@ -568,8 +575,10 @@ struct vfio_iommu_spapr_tce_create { __u32 flags; /* in */ __u32 page_shift; + __u32 __resv1; __u64 window_size; __u32 levels; + __u32 __resv2; /* out */ __u64 start_addr; }; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index a0e87d16b726..14cd5ebfee6d 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -46,7 +46,7 @@ * All kernel-specific stuff were moved to media/v4l2-dev.h, so * no #if __KERNEL tests are allowed here * - * See http://linuxtv.org for more info + * See https://linuxtv.org for more info * * Author: Bill Dirks <bill@thedirks.org> * Justin Schoeman @@ -1476,7 +1476,12 @@ struct v4l2_ext_control { } __attribute__ ((packed)); struct v4l2_ext_controls { - __u32 ctrl_class; + union { +#ifndef __KERNEL__ + __u32 ctrl_class; +#endif + __u32 which; + }; __u32 count; __u32 error_idx; __u32 reserved[2]; @@ -1484,9 +1489,14 @@ struct v4l2_ext_controls { }; #define V4L2_CTRL_ID_MASK (0x0fffffff) +#ifndef __KERNEL__ #define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) +#endif +#define V4L2_CTRL_ID2WHICH(id) ((id) & 0x0fff0000UL) #define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) #define V4L2_CTRL_MAX_DIMS (4) +#define V4L2_CTRL_WHICH_CUR_VAL 0 +#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000 enum v4l2_ctrl_type { V4L2_CTRL_TYPE_INTEGER = 1, diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 7a63faa9065c..4b04ead26cd9 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -287,7 +287,7 @@ struct virtio_gpu_get_capset { /* VIRTIO_GPU_RESP_OK_CAPSET */ struct virtio_gpu_resp_capset { struct virtio_gpu_ctrl_hdr hdr; - uint8_t capset_data[]; + __u8 capset_data[]; }; #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 599562fe5d57..288694e422fb 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -127,35 +127,33 @@ #define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ #define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ #define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ -#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */ +#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */ #define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ #define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ #define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */ -#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ -#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ +#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ +#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ /* separate EPROM commands from normal PSM commands */ #define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */ #define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */ -#define HFI1_CMD_EP_ERASE_P0 66 /* erase EPROM partition 0 */ -#define HFI1_CMD_EP_ERASE_P1 67 /* erase EPROM partition 1 */ -#define HFI1_CMD_EP_READ_P0 68 /* read EPROM partition 0 */ -#define HFI1_CMD_EP_READ_P1 69 /* read EPROM partition 1 */ -#define HFI1_CMD_EP_WRITE_P0 70 /* write EPROM partition 0 */ -#define HFI1_CMD_EP_WRITE_P1 71 /* write EPROM partition 1 */ - -#define _HFI1_EVENT_FROZEN_BIT 0 -#define _HFI1_EVENT_LINKDOWN_BIT 1 -#define _HFI1_EVENT_LID_CHANGE_BIT 2 -#define _HFI1_EVENT_LMC_CHANGE_BIT 3 -#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 +/* range 66-74 no longer used */ +#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */ +#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */ +#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */ + +#define _HFI1_EVENT_FROZEN_BIT 0 +#define _HFI1_EVENT_LINKDOWN_BIT 1 +#define _HFI1_EVENT_LID_CHANGE_BIT 2 +#define _HFI1_EVENT_LMC_CHANGE_BIT 3 +#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 #define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT -#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) -#define HFI1_EVENT_LINKDOWN_BIT (1UL << _HFI1_EVENT_LINKDOWN_BIT) -#define HFI1_EVENT_LID_CHANGE_BIT (1UL << _HFI1_EVENT_LID_CHANGE_BIT) -#define HFI1_EVENT_LMC_CHANGE_BIT (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) -#define HFI1_EVENT_SL2VL_CHANGE_BIT (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) +#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) +#define HFI1_EVENT_LINKDOWN (1UL << _HFI1_EVENT_LINKDOWN_BIT) +#define HFI1_EVENT_LID_CHANGE (1UL << _HFI1_EVENT_LID_CHANGE_BIT) +#define HFI1_EVENT_LMC_CHANGE (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) +#define HFI1_EVENT_SL2VL_CHANGE (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) /* * These are the status bits readable (in ASCII form, 64bit value) diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h index 831351b2e660..2302f3ce5f86 100644 --- a/include/uapi/scsi/cxlflash_ioctl.h +++ b/include/uapi/scsi/cxlflash_ioctl.h @@ -31,6 +31,16 @@ struct dk_cxlflash_hdr { }; /* + * Return flag definitions available to all ioctls + * + * Similar to the input flags, these are grown from the bottom-up with the + * intention that ioctl-specific return flag definitions would grow from the + * top-down, allowing the two sets to co-exist. While not required/enforced + * at this time, this provides future flexibility. + */ +#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL + +/* * Notes: * ----- * The 'context_id' field of all ioctl structures contains the context diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 26539a7e4880..c4cc1e40b35c 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -243,7 +243,7 @@ struct snd_soc_tplg_manifest { __le32 control_elems; /* number of control elements */ __le32 widget_elems; /* number of widget elements */ __le32 graph_elems; /* number of graph elements */ - __le32 dai_elems; /* number of DAI elements */ + __le32 pcm_elems; /* number of PCM elements */ __le32 dai_link_elems; /* number of DAI link elements */ struct snd_soc_tplg_private priv; } __attribute__((packed)); diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h index d9bd9ca0d5b0..9625484a4a2a 100644 --- a/include/uapi/sound/compress_params.h +++ b/include/uapi/sound/compress_params.h @@ -73,7 +73,8 @@ #define SND_AUDIOCODEC_IEC61937 ((__u32) 0x0000000B) #define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C) #define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D) -#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_G729 +#define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E) +#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_BESPOKE /* * Profile and modes are listed with bit masks. This allows for a @@ -312,7 +313,7 @@ struct snd_enc_flac { struct snd_enc_generic { __u32 bw; /* encoder bandwidth */ - __s32 reserved[15]; + __s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */ } __attribute__((packed, aligned(4))); union snd_codec_options { diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index aa7610a9b867..d0661977667e 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -144,6 +144,56 @@ struct ioctl_gntdev_unmap_notify { __u32 event_channel_port; }; +struct gntdev_grant_copy_segment { + union { + void __user *virt; + struct { + grant_ref_t ref; + __u16 offset; + domid_t domid; + } foreign; + } source, dest; + __u16 len; + + __u16 flags; /* GNTCOPY_* */ + __s16 status; /* GNTST_* */ +}; + +/* + * Copy between grant references and local buffers. + * + * The copy is split into @count @segments, each of which can copy + * to/from one grant reference. + * + * Each segment is similar to struct gnttab_copy in the hypervisor ABI + * except the local buffer is specified using a virtual address + * (instead of a GFN and offset). + * + * The local buffer may cross a Xen page boundary -- the driver will + * split segments into multiple ops if required. + * + * Returns 0 if all segments have been processed and @status in each + * segment is valid. Note that one or more segments may have failed + * (status != GNTST_okay). + * + * If the driver had to split a segment into two or more ops, @status + * includes the status of the first failed op for that segment (or + * GNTST_okay if all ops were successful). + * + * If -1 is returned, the status of all segments is undefined. + * + * EINVAL: A segment has local buffers for both source and + * destination. + * EINVAL: A segment crosses the boundary of a foreign page. + * EFAULT: A segment's local buffer is not accessible. + */ +#define IOCTL_GNTDEV_GRANT_COPY \ + _IOC(_IOC_NONE, 'G', 8, sizeof(struct ioctl_gntdev_grant_copy)) +struct ioctl_gntdev_grant_copy { + unsigned int count; + struct gntdev_grant_copy_segment __user *segments; +}; + /* Clear (set to zero) the byte specified by index */ #define UNMAP_NOTIFY_CLEAR_BYTE 0x1 /* Send an interrupt on the indicated event channel */ diff --git a/include/video/omapdss.h b/include/video/omapdss.h index f001a356fd98..295b41e20d8e 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -87,6 +87,7 @@ enum omap_channel { OMAP_DSS_CHANNEL_DIGIT = 1, OMAP_DSS_CHANNEL_LCD2 = 2, OMAP_DSS_CHANNEL_LCD3 = 3, + OMAP_DSS_CHANNEL_WB = 4, }; enum omap_color_mode { @@ -367,14 +368,12 @@ struct omap_video_timings { enum omap_dss_signal_edge sync_pclk_edge; }; -#ifdef CONFIG_OMAP2_DSS_VENC /* Hardcoded timings for tv modes. Venc only uses these to * identify the mode, and does not actually use the configs * itself. However, the configs should be something that * a normal monitor can also show */ extern const struct omap_video_timings omap_dss_pal_timings; extern const struct omap_video_timings omap_dss_ntsc_timings; -#endif struct omap_dss_cpr_coefs { s16 rr, rg, rb; @@ -866,8 +865,6 @@ void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, int dss_feat_get_num_mgrs(void); int dss_feat_get_num_ovls(void); -enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel); -enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel); enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); diff --git a/include/video/sh_mobile_hdmi.h b/include/video/sh_mobile_hdmi.h deleted file mode 100644 index 63d20efa254a..000000000000 --- a/include/video/sh_mobile_hdmi.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * SH-Mobile High-Definition Multimedia Interface (HDMI) - * - * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef SH_MOBILE_HDMI_H -#define SH_MOBILE_HDMI_H - -struct sh_mobile_lcdc_chan_cfg; -struct device; -struct clk; - -/* - * flags format - * - * 0x00000CBA - * - * A: Audio source select - * B: Int output option - * C: Chip specific option - */ - -/* Audio source select */ -#define HDMI_SND_SRC_MASK (0xF << 0) -#define HDMI_SND_SRC_I2S (0 << 0) /* default */ -#define HDMI_SND_SRC_SPDIF (1 << 0) -#define HDMI_SND_SRC_DSD (2 << 0) -#define HDMI_SND_SRC_HBR (3 << 0) - -/* Int output option */ -#define HDMI_OUTPUT_PUSH_PULL (1 << 4) /* System control : output mode */ -#define HDMI_OUTPUT_POLARITY_HI (1 << 5) /* System control : output polarity */ - -/* Chip specific option */ -#define HDMI_32BIT_REG (1 << 8) -#define HDMI_HAS_HTOP1 (1 << 9) - -struct sh_mobile_hdmi_info { - unsigned int flags; - long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq, - unsigned long *parent_freq); -}; - -#endif diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h index 8e035871360e..732efb08c3e1 100644 --- a/include/xen/interface/platform.h +++ b/include/xen/interface/platform.h @@ -35,14 +35,23 @@ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC, * 1 January, 1970 if the current system time was <system_time>. */ -#define XENPF_settime 17 -struct xenpf_settime { +#define XENPF_settime32 17 +struct xenpf_settime32 { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; -DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime_t); +DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime32_t); +#define XENPF_settime64 62 +struct xenpf_settime64 { + /* IN variables. */ + uint64_t secs; + uint32_t nsecs; + uint32_t mbz; + uint64_t system_time; +}; +DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime64_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. @@ -495,7 +504,8 @@ struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { - struct xenpf_settime settime; + struct xenpf_settime32 settime32; + struct xenpf_settime64 settime64; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 167071c290b3..d1331121c0bd 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -48,7 +48,7 @@ #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 -#define __HYPERVISOR_dom0_op 7 +#define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index e4e214a5abd5..86abe07b20ec 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -5,6 +5,7 @@ #include <linux/notifier.h> #include <linux/efi.h> #include <asm/xen/interface.h> +#include <xen/interface/vcpu.h> DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); @@ -18,6 +19,10 @@ void xen_arch_suspend(void); void xen_resume_notifier_register(struct notifier_block *nb); void xen_resume_notifier_unregister(struct notifier_block *nb); +bool xen_vcpu_stolen(int vcpu); +void xen_setup_runstate_info(int cpu); +void xen_get_runstate_snapshot(struct vcpu_runstate_info *res); + int xen_setup_shutdown_event(void); extern unsigned long *xen_contiguous_bitmap; |