diff options
Diffstat (limited to 'include')
377 files changed, 8351 insertions, 2030 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 4d40e9b5d938..788c6c35291a 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -61,12 +61,12 @@ bool acpi_ata_match(acpi_handle handle); bool acpi_bay_match(acpi_handle handle); bool acpi_dock_match(acpi_handle handle); -bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs); +bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs); union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, - int rev, int func, union acpi_object *argv4); + u64 rev, u64 func, union acpi_object *argv4); static inline union acpi_object * -acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func, +acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4, acpi_object_type type) { union acpi_object *obj; diff --git a/include/acpi/video.h b/include/acpi/video.h index 70a41f742037..5731ccb42585 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); */ extern bool acpi_video_handles_brightness_key_presses(void); extern int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br); + struct acpi_video_device_brightness **dev_br, + int *pmax_level); #else static inline int acpi_video_register(void) { return 0; } static inline void acpi_video_unregister(void) { return; } @@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void) return false; } static inline int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br) + struct acpi_video_device_brightness **dev_br, + int *pmax_level) { return -ENODEV; } diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index eed3bbe88c8a..002b81f6f2bc 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define readl_relaxed readl #endif -#ifndef readq_relaxed +#if defined(readq) && !defined(readq_relaxed) #define readq_relaxed readq #endif @@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define writel_relaxed writel #endif -#ifndef writeq_relaxed +#if defined(writeq) && !defined(writeq_relaxed) #define writeq_relaxed writeq #endif diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 9401f4819891..d4458b6dbfb4 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd) #define io_remap_pfn_range remap_pfn_range #endif +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 5d8ffa3e6f8c..c1cde3577551 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -7,10 +7,10 @@ static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count; + return READ_ONCE(current_thread_info()->preempt_count); } -static __always_inline int *preempt_count_ptr(void) +static __always_inline volatile int *preempt_count_ptr(void) { return ¤t_thread_info()->preempt_count; } diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 35a52a880b2f..05f05f17a7c2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -22,14 +22,33 @@ #include <asm-generic/qspinlock_types.h> /** + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock + * @lock : Pointer to queued spinlock structure + * + * There is a very slight possibility of live-lock if the lockers keep coming + * and the waiter is just unfortunate enough to not see any unlock state. + */ +#ifndef queued_spin_unlock_wait +extern void queued_spin_unlock_wait(struct qspinlock *lock); +#endif + +/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ +#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { + /* + * See queued_spin_unlock_wait(). + * + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. + */ return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -99,19 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) } #endif -/** - * queued_spin_unlock_wait - wait until current lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -static inline void queued_spin_unlock_wait(struct qspinlock *lock) -{ - while (atomic_read(&lock->val) & _Q_LOCKED_MASK) - cpu_relax(); -} - #ifndef virt_spin_lock static __always_inline bool virt_spin_lock(struct qspinlock *lock) { diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h index c9ccafa0d99a..e74072d23e69 100644 --- a/include/asm-generic/seccomp.h +++ b/include/asm-generic/seccomp.h @@ -29,4 +29,18 @@ #define __NR_seccomp_sigreturn __NR_rt_sigreturn #endif +#ifdef CONFIG_COMPAT +#ifndef get_compat_mode1_syscalls +static inline const int *get_compat_mode1_syscalls(void) +{ + static const int mode1_syscalls_32[] = { + __NR_seccomp_read_32, __NR_seccomp_write_32, + __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, + 0, /* null terminated */ + }; + return mode1_syscalls_32; +} +#endif +#endif /* CONFIG_COMPAT */ + #endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 3d1a3af5cf59..a2508a8f9a9c 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -17,21 +17,6 @@ struct siginfo; void do_schedule_next_timer(struct siginfo *info); -#ifndef HAVE_ARCH_COPY_SIGINFO - -#include <linux/string.h> - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); -} - -#endif - extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); #endif diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 25d0914481a2..caedb74c9210 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -49,11 +49,16 @@ enum arch_timer_reg { #define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */ +struct arch_timer_kvm_info { + struct timecounter timecounter; + int virtual_irq; +}; + #ifdef CONFIG_ARM_ARCH_TIMER extern u32 arch_timer_get_rate(void); extern u64 (*arch_timer_read_counter)(void); -extern struct timecounter *arch_timer_get_timecounter(void); +extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void); #else @@ -67,11 +72,6 @@ static inline u64 arch_timer_read_counter(void) return 0; } -static inline struct timecounter *arch_timer_get_timecounter(void) -{ - return NULL; -} - #endif #endif diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h index 441aff9b5aa7..583f199400a3 100644 --- a/include/crypto/pkcs7.h +++ b/include/crypto/pkcs7.h @@ -12,6 +12,7 @@ #ifndef _CRYPTO_PKCS7_H #define _CRYPTO_PKCS7_H +#include <linux/verification.h> #include <crypto/public_key.h> struct key; @@ -26,14 +27,13 @@ extern void pkcs7_free_message(struct pkcs7_message *pkcs7); extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, const void **_data, size_t *_datalen, - bool want_wrapper); + size_t *_headerlen); /* * pkcs7_trust.c */ extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, - struct key *trust_keyring, - bool *_trusted); + struct key *trust_keyring); /* * pkcs7_verify.c diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index aa730ea7faf8..882ca0e1e7a5 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -15,20 +15,6 @@ #define _LINUX_PUBLIC_KEY_H /* - * The use to which an asymmetric key is being put. - */ -enum key_being_used_for { - VERIFYING_MODULE_SIGNATURE, - VERIFYING_FIRMWARE_SIGNATURE, - VERIFYING_KEXEC_PE_SIGNATURE, - VERIFYING_KEY_SIGNATURE, - VERIFYING_KEY_SELF_SIGNATURE, - VERIFYING_UNSPECIFIED_SIGNATURE, - NR__KEY_BEING_USED_FOR -}; -extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; - -/* * Cryptographic data for the public-key subtype of the asymmetric key type. * * Note that this may include private part of the key as well as the public @@ -41,12 +27,13 @@ struct public_key { const char *pkey_algo; }; -extern void public_key_destroy(void *payload); +extern void public_key_free(struct public_key *key); /* * Public key cryptography signature data */ struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; u8 *s; /* Signature */ u32 s_size; /* Number of bytes in signature */ u8 *digest; @@ -55,17 +42,21 @@ struct public_key_signature { const char *hash_algo; }; +extern void public_key_signature_free(struct public_key_signature *sig); + extern struct asymmetric_key_subtype public_key_subtype; + struct key; +struct key_type; +union key_payload; + +extern int restrict_link_by_signature(struct key *trust_keyring, + const struct key_type *type, + const union key_payload *payload); + extern int verify_signature(const struct key *key, const struct public_key_signature *sig); -struct asymmetric_key_id; -extern struct key *x509_request_asymmetric_key(struct key *keyring, - const struct asymmetric_key_id *id, - const struct asymmetric_key_id *skid, - bool partial); - int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig); diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h new file mode 100644 index 000000000000..25afb31f0389 --- /dev/null +++ b/include/drm/bridge/analogix_dp.h @@ -0,0 +1,41 @@ +/* + * Analogix DP (Display Port) Core interface driver. + * + * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ANALOGIX_DP_H_ +#define _ANALOGIX_DP_H_ + +#include <drm/drm_crtc.h> + +enum analogix_dp_devtype { + EXYNOS_DP, + RK3288_DP, +}; + +struct analogix_dp_plat_data { + enum analogix_dp_devtype dev_type; + struct drm_panel *panel; + struct drm_encoder *encoder; + struct drm_connector *connector; + + int (*power_on)(struct analogix_dp_plat_data *); + int (*power_off)(struct analogix_dp_plat_data *); + int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, + struct drm_connector *); + int (*get_modes)(struct analogix_dp_plat_data *); +}; + +int analogix_dp_resume(struct device *dev); +int analogix_dp_suspend(struct device *dev); + +int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, + struct analogix_dp_plat_data *plat_data); +void analogix_dp_unbind(struct device *dev, struct device *master, void *data); + +#endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 3c8422c69572..84f1a8eefbdb 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -90,7 +90,7 @@ struct reservation_object; struct dma_buf_attachment; /* - * 4 debug categories are defined: + * The following categories are defined: * * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... * This is the category used by the DRM_DEBUG() macro. @@ -580,12 +580,21 @@ struct drm_driver { void (*debugfs_cleanup)(struct drm_minor *minor); /** - * Driver-specific constructor for drm_gem_objects, to set up - * obj->driver_private. + * @gem_free_object: deconstructor for drm_gem_objects * - * Returns 0 on success. + * This is deprecated and should not be used by new drivers. Use + * @gem_free_object_unlocked instead. */ void (*gem_free_object) (struct drm_gem_object *obj); + + /** + * @gem_free_object_unlocked: deconstructor for drm_gem_objects + * + * This is for drivers which are not encumbered with dev->struct_mutex + * legacy locking schemes. Use this hook instead of @gem_free_object. + */ + void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); @@ -769,6 +778,7 @@ struct drm_device { atomic_t buf_alloc; /**< Buffer allocation in progress */ /*@} */ + struct mutex filelist_mutex; struct list_head filelist; /** \name Memory management */ @@ -805,14 +815,6 @@ struct drm_device { int irq; /* - * At load time, disabling the vblank interrupt won't be allowed since - * old clients may not call the modeset ioctl and therefore misbehave. - * Once the modeset ioctl *has* been called though, we can safely - * disable them when unused. - */ - bool vblank_disable_allowed; - - /* * If true, vblank interrupt will be disabled immediately when the * refcount drops to zero, as opposed to via the vblank disable * timer. diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h index 193ef19dfc5c..b2d912670a7f 100644 --- a/include/drm/drm_agpsupport.h +++ b/include/drm/drm_agpsupport.h @@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, uint32_t type); struct drm_agp_head *drm_agp_init(struct drm_device *dev); -void drm_agp_clear(struct drm_device *dev); +void drm_legacy_agp_clear(struct drm_device *dev); int drm_agp_acquire(struct drm_device *dev); int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) return NULL; } -static inline void drm_agp_clear(struct drm_device *dev) +static inline void drm_legacy_agp_clear(struct drm_device *dev) { } diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d3eaa5df187a..92c84e9ab09a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -137,7 +137,7 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); -int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); #define for_each_connector_in_state(state, connector, connector_state, __i) \ for ((__i) = 0; \ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 9054598c9a7a..d473dcc91f54 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -40,8 +40,10 @@ int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, - bool async); + bool nonblock); +void drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state); bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, struct drm_atomic_state *old_state, struct drm_crtc *crtc); @@ -108,6 +110,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, uint32_t flags); int drm_atomic_helper_connector_dpms(struct drm_connector *connector, int mode); +struct drm_encoder * +drm_atomic_helper_best_encoder(struct drm_connector *connector); /* default implementations for state handling */ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); @@ -115,8 +119,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, struct drm_crtc_state *state); struct drm_crtc_state * drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); -void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *state); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); @@ -125,8 +128,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, struct drm_plane_state *state); struct drm_plane_state * drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); -void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); @@ -142,8 +144,7 @@ struct drm_atomic_state * drm_atomic_helper_duplicate_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); void -__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, - struct drm_connector_state *state); +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state); void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e0170bf80bb0..d1559cd04e3d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -45,20 +45,12 @@ struct drm_clip_rect; struct device_node; struct fence; -#define DRM_MODE_OBJECT_CRTC 0xcccccccc -#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 -#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 -#define DRM_MODE_OBJECT_MODE 0xdededede -#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 -#define DRM_MODE_OBJECT_FB 0xfbfbfbfb -#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb -#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee -#define DRM_MODE_OBJECT_ANY 0 - struct drm_mode_object { uint32_t id; uint32_t type; struct drm_object_properties *properties; + struct kref refcount; + void (*free_cb)(struct kref *kref); }; #define DRM_OBJECT_MAX_PROPERTY 24 @@ -126,6 +118,14 @@ enum subpixel_order { #define DRM_COLOR_FORMAT_RGB444 (1<<0) #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) + +#define DRM_BUS_FLAG_DE_LOW (1<<0) +#define DRM_BUS_FLAG_DE_HIGH (1<<1) +/* drive data on pos. edge */ +#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2) +/* drive data on neg. edge */ +#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3) + /* * Describes a given display (e.g. CRT or flat panel) and its limitations. */ @@ -147,6 +147,7 @@ struct drm_display_info { const u32 *bus_formats; unsigned int num_bus_formats; + u32 bus_flags; /* Mask of supported hdmi deep color modes */ u8 edid_hdmi_dc_modes; @@ -233,8 +234,8 @@ struct drm_framebuffer { * should be deferred. In cases like this, the driver would like to * hold a ref to the fb even though it has already been removed from * userspace perspective. + * The refcount is stored inside the mode object. */ - struct kref refcount; /* * Place on the dev->mode_config.fb_list, access protected by * dev->mode_config.fb_lock. @@ -258,7 +259,6 @@ struct drm_framebuffer { struct drm_property_blob { struct drm_mode_object base; struct drm_device *dev; - struct kref refcount; struct list_head head_global; struct list_head head_file; size_t length; @@ -1895,7 +1895,7 @@ struct drm_mode_config_funcs { * drm_atomic_helper_commit(), or one of the exported sub-functions of * it. * - * Asynchronous commits (as indicated with the async parameter) must + * Nonblocking commits (as indicated with the nonblock parameter) must * do any preparatory work which might result in an unsuccessful commit * in the context of this callback. The only exceptions are hardware * errors resulting in -EIO. But even in that case the driver must @@ -1908,7 +1908,7 @@ struct drm_mode_config_funcs { * The driver must wait for any pending rendering to the new * framebuffers to complete before executing the flip. It should also * wait for any pending rendering from other drivers if the underlying - * buffer is a shared dma-buf. Asynchronous commits must not wait for + * buffer is a shared dma-buf. Nonblocking commits must not wait for * rendering in the context of this callback. * * An application can request to be notified when the atomic commit has @@ -1939,7 +1939,7 @@ struct drm_mode_config_funcs { * * 0 on success or one of the below negative error codes: * - * - -EBUSY, if an asynchronous updated is requested and there is + * - -EBUSY, if a nonblocking updated is requested and there is * an earlier updated pending. Drivers are allowed to support a queue * of outstanding updates, but currently no driver supports that. * Note that drivers must wait for preceding updates to complete if a @@ -1969,7 +1969,7 @@ struct drm_mode_config_funcs { */ int (*atomic_commit)(struct drm_device *dev, struct drm_atomic_state *state, - bool async); + bool nonblock); /** * @atomic_state_alloc: @@ -2259,8 +2259,9 @@ static inline unsigned drm_connector_index(struct drm_connector *connector) return connector->connector_id; } -/* helper to unplug all connectors from sysfs for device */ -extern void drm_connector_unplug_all(struct drm_device *dev); +/* helpers to {un}register all connectors from sysfs for device */ +extern int drm_connector_register_all(struct drm_device *dev); +extern void drm_connector_unregister_all(struct drm_device *dev); extern int drm_bridge_add(struct drm_bridge *bridge); extern void drm_bridge_remove(struct drm_bridge *bridge); @@ -2386,8 +2387,6 @@ extern int drm_framebuffer_init(struct drm_device *dev, const struct drm_framebuffer_funcs *funcs); extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, uint32_t id); -extern void drm_framebuffer_unreference(struct drm_framebuffer *fb); -extern void drm_framebuffer_reference(struct drm_framebuffer *fb); extern void drm_framebuffer_remove(struct drm_framebuffer *fb); extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); @@ -2445,6 +2444,8 @@ extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); +void drm_mode_object_reference(struct drm_mode_object *obj); +void drm_mode_object_unreference(struct drm_mode_object *obj); /* IOCTLs */ extern int drm_mode_getresources(struct drm_device *dev, @@ -2510,6 +2511,8 @@ extern int drm_edid_header_is_valid(const u8 *raw_edid); extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, bool *edid_corrupt); extern bool drm_edid_is_valid(struct edid *edid); +extern void drm_edid_get_monitor_name(struct edid *edid, char *name, + int buflen); extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, char topology[8]); @@ -2577,7 +2580,15 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, return mo ? obj_to_encoder(mo) : NULL; } -static inline struct drm_connector *drm_connector_find(struct drm_device *dev, +/** + * drm_connector_lookup - lookup connector object + * @dev: DRM device + * @id: connector object id + * + * This function looks up the connector object specified by id + * add takes a reference to it. + */ +static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, uint32_t id) { struct drm_mode_object *mo; @@ -2600,14 +2611,73 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev, static inline uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision) { - uint32_t val = user_input + (1 << (16 - bit_precision - 1)); + uint32_t val = user_input; uint32_t max = 0xffff >> (16 - bit_precision); - val >>= 16 - bit_precision; + /* Round only if we're not using full precision. */ + if (bit_precision < 16) { + val += 1UL << (16 - bit_precision - 1); + val >>= 16 - bit_precision; + } return clamp_val(val, 0, max); } +/** + * drm_framebuffer_reference - incr the fb refcnt + * @fb: framebuffer + * + * This functions increments the fb's refcount. + */ +static inline void drm_framebuffer_reference(struct drm_framebuffer *fb) +{ + drm_mode_object_reference(&fb->base); +} + +/** + * drm_framebuffer_unreference - unref a framebuffer + * @fb: framebuffer to unref + * + * This functions decrements the fb's refcount and frees it if it drops to zero. + */ +static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) +{ + drm_mode_object_unreference(&fb->base); +} + +/** + * drm_framebuffer_read_refcount - read the framebuffer reference count. + * @fb: framebuffer + * + * This functions returns the framebuffer's reference count. + */ +static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) +{ + return atomic_read(&fb->base.refcount.refcount); +} + +/** + * drm_connector_reference - incr the connector refcnt + * @connector: connector + * + * This function increments the connector's refcount. + */ +static inline void drm_connector_reference(struct drm_connector *connector) +{ + drm_mode_object_reference(&connector->base); +} + +/** + * drm_connector_unreference - unref a connector + * @connector: connector to unref + * + * This function decrements the connector's refcount and frees it if it drops to zero. + */ +static inline void drm_connector_unreference(struct drm_connector *connector) +{ + drm_mode_object_unreference(&connector->base); +} + /* Plane list iterator for legacy (overlay only) planes. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 623b4e98e748..c0d4df6a606f 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -73,4 +73,21 @@ struct displayid_tiled_block { u8 topology_id[8]; } __packed; +struct displayid_detailed_timings_1 { + u8 pixel_clock[3]; + u8 flags; + u8 hactive[2]; + u8 hblank[2]; + u8 hsync[2]; + u8 hsw[2]; + u8 vactive[2]; + u8 vblank[2]; + u8 vsync[2]; + u8 vsw[2]; +} __packed; + +struct displayid_detailed_timing_block { + struct displayid_block base; + struct displayid_detailed_timings_1 timings[0]; +}; #endif diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h new file mode 100644 index 000000000000..e8a9dfd0e055 --- /dev/null +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -0,0 +1,92 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_DP_DUAL_MODE_HELPER_H +#define DRM_DP_DUAL_MODE_HELPER_H + +#include <linux/types.h> + +/* + * Optional for type 1 DVI adaptors + * Mandatory for type 1 HDMI and type 2 adaptors + */ +#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ +#define DP_DUAL_MODE_HDMI_ID_LEN 16 +/* + * Optional for type 1 adaptors + * Mandatory for type 2 adaptors + */ +#define DP_DUAL_MODE_ADAPTOR_ID 0x10 +#define DP_DUAL_MODE_REV_MASK 0x07 +#define DP_DUAL_MODE_REV_TYPE2 0x00 +#define DP_DUAL_MODE_TYPE_MASK 0xf0 +#define DP_DUAL_MODE_TYPE_TYPE2 0xa0 +#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ +#define DP_DUAL_IEEE_OUI_LEN 3 +#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ +#define DP_DUAL_DEVICE_ID_LEN 6 +#define DP_DUAL_MODE_HARDWARE_REV 0x1a +#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b +#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c +#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d +#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e +#define DP_DUAL_MODE_TMDS_OEN 0x20 +#define DP_DUAL_MODE_TMDS_DISABLE 0x01 +#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 +#define DP_DUAL_MODE_CEC_ENABLE 0x01 +#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 + +struct i2c_adapter; + +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size); +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size); + +/** + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor + */ +enum drm_dp_dual_mode_type { + DRM_DP_DUAL_MODE_NONE, + DRM_DP_DUAL_MODE_UNKNOWN, + DRM_DP_DUAL_MODE_TYPE1_DVI, + DRM_DP_DUAL_MODE_TYPE1_HDMI, + DRM_DP_DUAL_MODE_TYPE2_DVI, + DRM_DP_DUAL_MODE_TYPE2_HDMI, +}; + +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter); +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool *enabled); +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable); +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); + +#endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1252108da0ef..9d03f167007b 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -73,6 +73,7 @@ # define DP_ENHANCED_FRAME_CAP (1 << 7) #define DP_MAX_DOWNSPREAD 0x003 +# define DP_MAX_DOWNSPREAD_0_5 (1 << 0) # define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) #define DP_NORP 0x004 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index dec6221e8198..919933d1beb4 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -328,7 +328,15 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); struct drm_connector *drm_select_eld(struct drm_encoder *encoder); + +#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE int drm_load_edid_firmware(struct drm_connector *connector); +#else +static inline int drm_load_edid_firmware(struct drm_connector *connector) +{ + return 0; +} +#endif int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index be62bd321e75..fd0dde9f0a6d 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -4,11 +4,18 @@ struct drm_fbdev_cma; struct drm_gem_cma_object; +struct drm_fb_helper_surface_size; +struct drm_framebuffer_funcs; +struct drm_fb_helper_funcs; struct drm_framebuffer; +struct drm_fb_helper; struct drm_device; struct drm_file; struct drm_mode_fb_cmd2; +struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs); struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count); @@ -16,7 +23,17 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); +int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, + const struct drm_framebuffer_funcs *funcs); + +void drm_fb_cma_destroy(struct drm_framebuffer *fb); +int drm_fb_cma_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle); +struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev, + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); @@ -24,6 +41,8 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); #ifdef CONFIG_DEBUG_FS +struct seq_file; + int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 062723bdcabe..5b4aa35026a3 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -172,6 +172,10 @@ struct drm_fb_helper_connector { * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors + * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @dirty_lock: spinlock protecting @dirty_clip + * @dirty_work: worker used to flush the framebuffer * * This is the main structure used by the fbdev helpers. Drivers supporting * fbdev emulation should embedded this into their overall driver structure. @@ -189,6 +193,9 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; /** * @kernel_fb_list: @@ -245,6 +252,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist); + ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, @@ -368,6 +378,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) { } +static inline void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ +} + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 0b3e11ab8757..fca1cd1b9c26 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj) } /** - * drm_gem_object_unreference - release a GEM BO reference + * __drm_gem_object_unreference - raw function to release a GEM BO reference * @obj: GEM buffer object * - * This releases a reference to @obj. Callers must hold the dev->struct_mutex - * lock when calling this function, even when the driver doesn't use - * dev->struct_mutex for anything. + * This function is meant to be used by drivers which are not encumbered with + * dev->struct_mutex legacy locking and which are using the + * gem_free_object_unlocked callback. It avoids all the locking checks and + * locking overhead of drm_gem_object_unreference() and + * drm_gem_object_unreference_unlocked(). * - * For drivers not encumbered with legacy locking use - * drm_gem_object_unreference_unlocked() instead. + * Drivers should never call this directly in their code. Instead they should + * wrap it up into a driver_gem_object_unreference(struct driver_gem_object + * *obj) wrapper function, and use that. Shared code should never call this, to + * avoid breaking drivers by accident which still depend upon dev->struct_mutex + * locking. */ static inline void -drm_gem_object_unreference(struct drm_gem_object *obj) +__drm_gem_object_unreference(struct drm_gem_object *obj) { - if (obj != NULL) { - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); - - kref_put(&obj->refcount, drm_gem_object_free); - } + kref_put(&obj->refcount, drm_gem_object_free); } -/** - * drm_gem_object_unreference_unlocked - release a GEM BO reference - * @obj: GEM buffer object - * - * This releases a reference to @obj. Callers must not hold the - * dev->struct_mutex lock when calling this function. - */ -static inline void -drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) -{ - struct drm_device *dev; - - if (!obj) - return; - - dev = obj->dev; - if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); - else - might_lock(&dev->struct_mutex); -} +void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj); +void drm_gem_object_unreference(struct drm_gem_object *obj); int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, @@ -256,9 +238,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); -struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, - struct drm_file *filp, - u32 handle); +struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle); diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 3e698038dc7b..a5ef2c7e40f8 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -154,8 +154,10 @@ struct drm_map_list { int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_local_map **map_p); -int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); +void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); +void drm_legacy_master_rmmaps(struct drm_device *dev, + struct drm_master *master); struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h index e42495ad8136..70d4e221a3ad 100644 --- a/include/drm/drm_mem_util.h +++ b/include/drm/drm_mem_util.h @@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } +static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) +{ + if (size != 0 && nmemb > SIZE_MAX / size) + return NULL; + + if (size * nmemb <= PAGE_SIZE) + return kmalloc(nmemb * size, gfp); + + if (gfp & __GFP_RECLAIMABLE) { + void *ptr = kmalloc(nmemb * size, + gfp | __GFP_NOWARN | __GFP_NORETRY); + if (ptr) + return ptr; + } + + return __vmalloc(size * nmemb, + gfp | __GFP_HIGHMEM, PAGE_KERNEL); +} + static __inline void drm_free_large(void *ptr) { kvfree(ptr); diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index b61c2d45192e..d4619dc2eecb 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -672,7 +672,7 @@ struct drm_connector_helper_funcs { * fixed panel can also manually add specific modes using * drm_mode_probed_add(). Drivers which manually add modes should also * make sure that the @display_info, @width_mm and @height_mm fields of the - * struct #drm_connector are filled in. + * struct &drm_connector are filled in. * * Virtual drivers that just want some standard VESA mode with a given * resolution can call drm_add_modes_noedid(), and mark the preferred diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 13ff44b28893..220d1e2b3db1 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -75,6 +75,14 @@ struct drm_panel_funcs { struct display_timing *timings); }; +/** + * struct drm_panel - DRM panel object + * @drm: DRM device owning the panel + * @connector: DRM connector that the panel is attached to + * @dev: parent device of the panel + * @funcs: operations that can be performed on the panel + * @list: panel entry in registry + */ struct drm_panel { struct drm_device *drm; struct drm_connector *connector; @@ -85,6 +93,17 @@ struct drm_panel { struct list_head list; }; +/** + * drm_disable_unprepare - power off a panel + * @panel: DRM panel + * + * Calling this function will completely power off a panel (assert the panel's + * reset, turn off power supplies, ...). After this function has completed, it + * is usually no longer possible to communicate with the panel until another + * call to drm_panel_prepare(). + * + * Return: 0 on success or a negative error code on failure. + */ static inline int drm_panel_unprepare(struct drm_panel *panel) { if (panel && panel->funcs && panel->funcs->unprepare) @@ -93,6 +112,16 @@ static inline int drm_panel_unprepare(struct drm_panel *panel) return panel ? -ENOSYS : -EINVAL; } +/** + * drm_panel_disable - disable a panel + * @panel: DRM panel + * + * This will typically turn off the panel's backlight or disable the display + * drivers. For smart panels it should still be possible to communicate with + * the integrated circuitry via any command bus after this call. + * + * Return: 0 on success or a negative error code on failure. + */ static inline int drm_panel_disable(struct drm_panel *panel) { if (panel && panel->funcs && panel->funcs->disable) @@ -101,6 +130,16 @@ static inline int drm_panel_disable(struct drm_panel *panel) return panel ? -ENOSYS : -EINVAL; } +/** + * drm_panel_prepare - power on a panel + * @panel: DRM panel + * + * Calling this function will enable power and deassert any reset signals to + * the panel. After this has completed it is possible to communicate with any + * integrated circuitry via a command bus. + * + * Return: 0 on success or a negative error code on failure. + */ static inline int drm_panel_prepare(struct drm_panel *panel) { if (panel && panel->funcs && panel->funcs->prepare) @@ -109,6 +148,16 @@ static inline int drm_panel_prepare(struct drm_panel *panel) return panel ? -ENOSYS : -EINVAL; } +/** + * drm_panel_enable - enable a panel + * @panel: DRM panel + * + * Calling this function will cause the panel display drivers to be turned on + * and the backlight to be enabled. Content will be visible on screen after + * this call completes. + * + * Return: 0 on success or a negative error code on failure. + */ static inline int drm_panel_enable(struct drm_panel *panel) { if (panel && panel->funcs && panel->funcs->enable) @@ -117,6 +166,16 @@ static inline int drm_panel_enable(struct drm_panel *panel) return panel ? -ENOSYS : -EINVAL; } +/** + * drm_panel_get_modes - probe the available display modes of a panel + * @panel: DRM panel + * + * The modes probed from the panel are automatically added to the connector + * that the panel is attached to. + * + * Return: The number of modes available from the panel on success or a + * negative error code on failure. + */ static inline int drm_panel_get_modes(struct drm_panel *panel) { if (panel && panel->funcs && panel->funcs->get_modes) diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 2f63dd5e05eb..06ea8e077ec2 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -176,19 +176,6 @@ static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) } /** - * drm_vma_node_has_offset() - Check whether node is added to offset manager - * @node: Node to be checked - * - * RETURNS: - * true iff the node was previously allocated an offset and added to - * an vma offset manager. - */ -static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node) -{ - return drm_mm_node_allocated(&node->vm_node); -} - -/** * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps * @node: Linked offset node * @@ -220,7 +207,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, struct address_space *file_mapping) { - if (drm_vma_node_has_offset(node)) + if (drm_mm_node_allocated(&node->vm_node)) unmap_mapping_range(file_mapping, drm_vma_node_offset_addr(node), drm_vma_node_size(node) << PAGE_SHIFT, 1); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 055a08ddac02..c801d9028e37 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -314,7 +314,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) * Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, +extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); /** * ttm_bo_validate diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3d4bf08aa21f..513f7f96b80a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -434,6 +434,18 @@ struct ttm_bo_driver { */ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + + /** + * Optional driver callback for when BO is removed from the LRU. + * Called with LRU lock held immediately before the removal. + */ + void (*lru_removal)(struct ttm_buffer_object *bo); + + /** + * Return the list_head after which a BO should be inserted in the LRU. + */ + struct list_head *(*lru_tail)(struct ttm_buffer_object *bo); + struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo); }; /** @@ -502,7 +514,6 @@ struct ttm_bo_global { * @vma_manager: Address space manager * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. - * @val_seq: Current validation sequence. * @dev_mapping: A pointer to the struct address_space representing the * device address space. * @wq: Work queue structure for the delayed delete workqueue. @@ -528,7 +539,6 @@ struct ttm_bo_device { * Protected by the global:lru lock. */ struct list_head ddestroy; - uint32_t val_seq; /* * Protected by load / firstopen / lastclose /unload sync. @@ -753,14 +763,16 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo); +struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo); + /** * __ttm_bo_reserve: * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. - * @use_ticket: If @bo is already reserved, Only sleep waiting for - * it to become unreserved if @ticket->stamp is older. + * @ticket: ticket used to acquire the ww_mutex. * * Will not remove reserved buffers from the lru lists. * Otherwise identical to ttm_bo_reserve. @@ -776,8 +788,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); * be returned if @use_ticket is set to true. */ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, + bool interruptible, bool no_wait, struct ww_acquire_ctx *ticket) { int ret = 0; @@ -806,8 +817,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. - * @use_ticket: If @bo is already reserved, Only sleep waiting for - * it to become unreserved if @ticket->stamp is older. + * @ticket: ticket used to acquire the ww_mutex. * * Locks a buffer object for validation. (Or prevents other processes from * locking it for validation) and removes it from lru lists, while taking @@ -846,15 +856,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * be returned if @use_ticket is set to true. */ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, + bool interruptible, bool no_wait, struct ww_acquire_ctx *ticket) { int ret; WARN_ON(!atomic_read(&bo->kref.refcount)); - ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); + ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); if (likely(ret == 0)) ttm_bo_del_sub_from_lru(bo); @@ -1030,8 +1039,7 @@ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; -#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) -#define TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> /** diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h new file mode 100644 index 000000000000..27359ad83904 --- /dev/null +++ b/include/dt-bindings/clock/ath79-clk.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2014, 2016 Antony Pavlov <antonynpavlov@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_ATH79_CLK_H +#define __DT_BINDINGS_ATH79_CLK_H + +#define ATH79_CLK_CPU 0 +#define ATH79_CLK_DDR 1 +#define ATH79_CLK_AHB 2 + +#define ATH79_CLK_END 3 + +#endif /* __DT_BINDINGS_ATH79_CLK_H */ diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h new file mode 100644 index 000000000000..f9f04dccc996 --- /dev/null +++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h @@ -0,0 +1,38 @@ +/* + * ARTPEC-6 clock controller indexes + * + * Copyright 2016 Axis Comunications AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H +#define DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H + +#define ARTPEC6_CLK_CPU 0 +#define ARTPEC6_CLK_CPU_PERIPH 1 +#define ARTPEC6_CLK_NAND_CLKA 2 +#define ARTPEC6_CLK_NAND_CLKB 3 +#define ARTPEC6_CLK_ETH_ACLK 4 +#define ARTPEC6_CLK_DMA_ACLK 5 +#define ARTPEC6_CLK_PTP_REF 6 +#define ARTPEC6_CLK_SD_PCLK 7 +#define ARTPEC6_CLK_SD_IMCLK 8 +#define ARTPEC6_CLK_I2S_HST 9 +#define ARTPEC6_CLK_I2S0_CLK 10 +#define ARTPEC6_CLK_I2S1_CLK 11 +#define ARTPEC6_CLK_UART_PCLK 12 +#define ARTPEC6_CLK_UART_REFCLK 13 +#define ARTPEC6_CLK_I2C 14 +#define ARTPEC6_CLK_SPI_PCLK 15 +#define ARTPEC6_CLK_SPI_SSPCLK 16 +#define ARTPEC6_CLK_SYS_TIMER 17 +#define ARTPEC6_CLK_FRACDIV_IN 18 +#define ARTPEC6_CLK_DBG_PCLK 19 + +/* This must be the highest clock index plus one. */ +#define ARTPEC6_CLK_NUMCLOCKS 20 + +#endif diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 61f1d20c2a67..360e00cefd35 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -44,5 +44,23 @@ #define BCM2835_CLOCK_EMMC 28 #define BCM2835_CLOCK_PERI_IMAGE 29 #define BCM2835_CLOCK_PWM 30 +#define BCM2835_CLOCK_PCM 31 -#define BCM2835_CLOCK_COUNT 31 +#define BCM2835_PLLA_DSI0 32 +#define BCM2835_PLLA_CCP2 33 +#define BCM2835_PLLD_DSI0 34 +#define BCM2835_PLLD_DSI1 35 + +#define BCM2835_CLOCK_AVEO 36 +#define BCM2835_CLOCK_DFT 37 +#define BCM2835_CLOCK_GP0 38 +#define BCM2835_CLOCK_GP1 39 +#define BCM2835_CLOCK_GP2 40 +#define BCM2835_CLOCK_SLIM 41 +#define BCM2835_CLOCK_SMI 42 +#define BCM2835_CLOCK_TEC 43 +#define BCM2835_CLOCK_DPI 44 +#define BCM2835_CLOCK_CAM0 45 +#define BCM2835_CLOCK_CAM1 46 +#define BCM2835_CLOCK_DSI0E 47 +#define BCM2835_CLOCK_DSI1E 48 diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 7699ee9c16c0..17ab8394bec7 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -217,8 +217,30 @@ /* divider clocks */ #define CLK_DOUT_PIXEL 768 +#define CLK_DOUT_ACLK400_WCORE 769 +#define CLK_DOUT_ACLK400_ISP 770 +#define CLK_DOUT_ACLK400_MSCL 771 +#define CLK_DOUT_ACLK200 772 +#define CLK_DOUT_ACLK200_FSYS2 773 +#define CLK_DOUT_ACLK100_NOC 774 +#define CLK_DOUT_PCLK200_FSYS 775 +#define CLK_DOUT_ACLK200_FSYS 776 +#define CLK_DOUT_ACLK333_432_GSCL 777 +#define CLK_DOUT_ACLK333_432_ISP 778 +#define CLK_DOUT_ACLK66 779 +#define CLK_DOUT_ACLK333_432_ISP0 780 +#define CLK_DOUT_ACLK266 781 +#define CLK_DOUT_ACLK166 782 +#define CLK_DOUT_ACLK333 783 +#define CLK_DOUT_ACLK333_G2D 784 +#define CLK_DOUT_ACLK266_G2D 785 +#define CLK_DOUT_ACLK_G3D 786 +#define CLK_DOUT_ACLK300_JPEG 787 +#define CLK_DOUT_ACLK300_DISP1 788 +#define CLK_DOUT_ACLK300_GSCL 789 +#define CLK_DOUT_ACLK400_DISP1 790 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 769 +#define CLK_NR_CLKS 791 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */ diff --git a/include/dt-bindings/clock/hi3519-clock.h b/include/dt-bindings/clock/hi3519-clock.h new file mode 100644 index 000000000000..14f4d2184e5a --- /dev/null +++ b/include/dt-bindings/clock/hi3519-clock.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2015 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __DTS_HI3519_CLOCK_H +#define __DTS_HI3519_CLOCK_H + +#define HI3519_FMC_CLK 1 +#define HI3519_SPI0_CLK 2 +#define HI3519_SPI1_CLK 3 +#define HI3519_SPI2_CLK 4 +#define HI3519_UART0_CLK 5 +#define HI3519_UART1_CLK 6 +#define HI3519_UART2_CLK 7 +#define HI3519_UART3_CLK 8 +#define HI3519_UART4_CLK 9 +#define HI3519_PWM_CLK 10 +#define HI3519_DMA_CLK 11 +#define HI3519_IR_CLK 12 +#define HI3519_ETH_PHY_CLK 13 +#define HI3519_ETH_MAC_CLK 14 +#define HI3519_ETH_MACIF_CLK 15 +#define HI3519_USB2_BUS_CLK 16 +#define HI3519_USB2_PORT_CLK 17 +#define HI3519_USB3_CLK 18 + +#endif /* __DTS_HI3519_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index edca8985c50e..1183347c383f 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -448,5 +448,6 @@ #define IMX7D_PLL_DRAM_TEST_DIV 435 #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 -#define IMX7D_CLK_END 438 +#define IMX7D_CKIL 438 +#define IMX7D_CLK_END 439 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h new file mode 100644 index 000000000000..184647a6a8de --- /dev/null +++ b/include/dt-bindings/clock/microchip,pic32-clock.h @@ -0,0 +1,42 @@ +/* + * Purna Chandra Mandal,<purna.mandal@microchip.com> + * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ +#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ + +/* clock output indices */ +#define POSCCLK 0 +#define FRCCLK 1 +#define BFRCCLK 2 +#define LPRCCLK 3 +#define SOSCCLK 4 +#define FRCDIVCLK 5 +#define PLLCLK 6 +#define SCLK 7 +#define PB1CLK 8 +#define PB2CLK 9 +#define PB3CLK 10 +#define PB4CLK 11 +#define PB5CLK 12 +#define PB6CLK 13 +#define PB7CLK 14 +#define REF1CLK 15 +#define REF2CLK 16 +#define REF3CLK 17 +#define REF4CLK 18 +#define REF5CLK 19 +#define UPLLCLK 20 +#define MAXCLKS 21 + +#endif /* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h index 7956ba1bc974..6094bf7e50ab 100644 --- a/include/dt-bindings/clock/mt8173-clk.h +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -176,7 +176,8 @@ #define CLK_APMIXED_LVDSPLL 13 #define CLK_APMIXED_MSDCPLL2 14 #define CLK_APMIXED_REF2USB_TX 15 -#define CLK_APMIXED_NR_CLK 16 +#define CLK_APMIXED_HDMI_REF 16 +#define CLK_APMIXED_NR_CLK 17 /* INFRA_SYS */ diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 0a05b0d36ae7..bd3530e56d46 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -346,7 +346,7 @@ #define TEGRA210_CLK_PLL_P_OUT_HSIO 316 #define TEGRA210_CLK_PLL_P_OUT_XUSB 317 #define TEGRA210_CLK_XUSB_SSP_SRC 318 -/* 319 */ +#define TEGRA210_CLK_PLL_RE_OUT1 319 /* 320 */ /* 321 */ /* 322 */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 56c16aaea112..45997750c8a0 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -194,7 +194,11 @@ #define VF610_PLL7_BYPASS 181 #define VF610_CLK_SNVS 182 #define VF610_CLK_DAP 183 -#define VF610_CLK_OCOTP 184 -#define VF610_CLK_END 185 +#define VF610_CLK_OCOTP 184 +#define VF610_CLK_DDRMC 185 +#define VF610_CLK_WKPU 186 +#define VF610_CLK_TCON0 187 +#define VF610_CLK_TCON1 188 +#define VF610_CLK_END 189 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h new file mode 100644 index 000000000000..58654fd7aa1e --- /dev/null +++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h @@ -0,0 +1,154 @@ +/* + * GPIO definitions for Amlogic Meson GXBB SoCs + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H +#define _DT_BINDINGS_MESON_GXBB_GPIO_H + +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 + +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define BOOT_0 20 +#define BOOT_1 21 +#define BOOT_2 22 +#define BOOT_3 23 +#define BOOT_4 24 +#define BOOT_5 25 +#define BOOT_6 26 +#define BOOT_7 27 +#define BOOT_8 28 +#define BOOT_9 29 +#define BOOT_10 30 +#define BOOT_11 31 +#define BOOT_12 32 +#define BOOT_13 33 +#define BOOT_14 34 +#define BOOT_15 35 +#define BOOT_16 36 +#define BOOT_17 37 +#define CARD_0 38 +#define CARD_1 39 +#define CARD_2 40 +#define CARD_3 41 +#define CARD_4 42 +#define CARD_5 43 +#define CARD_6 44 +#define GPIODV_0 45 +#define GPIODV_1 46 +#define GPIODV_2 47 +#define GPIODV_3 48 +#define GPIODV_4 49 +#define GPIODV_5 50 +#define GPIODV_6 51 +#define GPIODV_7 52 +#define GPIODV_8 53 +#define GPIODV_9 54 +#define GPIODV_10 55 +#define GPIODV_11 56 +#define GPIODV_12 57 +#define GPIODV_13 58 +#define GPIODV_14 59 +#define GPIODV_15 60 +#define GPIODV_16 61 +#define GPIODV_17 62 +#define GPIODV_18 63 +#define GPIODV_19 64 +#define GPIODV_20 65 +#define GPIODV_21 66 +#define GPIODV_22 67 +#define GPIODV_23 68 +#define GPIODV_24 69 +#define GPIODV_25 70 +#define GPIODV_26 71 +#define GPIODV_27 72 +#define GPIODV_28 73 +#define GPIODV_29 74 +#define GPIOY_0 75 +#define GPIOY_1 76 +#define GPIOY_2 77 +#define GPIOY_3 78 +#define GPIOY_4 79 +#define GPIOY_5 80 +#define GPIOY_6 81 +#define GPIOY_7 82 +#define GPIOY_8 83 +#define GPIOY_9 84 +#define GPIOY_10 85 +#define GPIOY_11 86 +#define GPIOY_12 87 +#define GPIOY_13 88 +#define GPIOY_14 89 +#define GPIOY_15 90 +#define GPIOY_16 91 +#define GPIOX_0 92 +#define GPIOX_1 93 +#define GPIOX_2 94 +#define GPIOX_3 95 +#define GPIOX_4 96 +#define GPIOX_5 97 +#define GPIOX_6 98 +#define GPIOX_7 99 +#define GPIOX_8 100 +#define GPIOX_9 101 +#define GPIOX_10 102 +#define GPIOX_11 103 +#define GPIOX_12 104 +#define GPIOX_13 105 +#define GPIOX_14 106 +#define GPIOX_15 107 +#define GPIOX_16 108 +#define GPIOX_17 109 +#define GPIOX_18 110 +#define GPIOX_19 111 +#define GPIOX_20 112 +#define GPIOX_21 113 +#define GPIOX_22 114 +#define GPIOCLK_0 115 +#define GPIOCLK_1 116 +#define GPIOCLK_2 117 +#define GPIOCLK_3 118 +#define GPIO_TEST_N 119 + +#endif diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h new file mode 100644 index 000000000000..c48aca1dcade --- /dev/null +++ b/include/dt-bindings/iio/adi,ad5592r.h @@ -0,0 +1,16 @@ + +#ifndef _DT_BINDINGS_ADI_AD5592R_H +#define _DT_BINDINGS_ADI_AD5592R_H + +#define CH_MODE_UNUSED 0 +#define CH_MODE_ADC 1 +#define CH_MODE_DAC 2 +#define CH_MODE_DAC_AND_ADC 3 +#define CH_MODE_GPIO 8 + +#define CH_OFFSTATE_PULLDOWN 0 +#define CH_OFFSTATE_OUT_LOW 1 +#define CH_OFFSTATE_OUT_HIGH 2 +#define CH_OFFSTATE_OUT_TRISTATE 3 + +#endif /* _DT_BINDINGS_ADI_AD5592R_H */ diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h index c40f665e2712..dedf46ffdb53 100644 --- a/include/dt-bindings/mfd/arizona.h +++ b/include/dt-bindings/mfd/arizona.h @@ -110,4 +110,9 @@ #define ARIZONA_ACCDET_MODE_HPM 4 #define ARIZONA_ACCDET_MODE_ADC 7 +#define ARIZONA_GPSW_OPEN 0 +#define ARIZONA_GPSW_CLOSED 1 +#define ARIZONA_GPSW_CLAMP_ENABLED 2 +#define ARIZONA_GPSW_CLAMP_DISABLED 3 + #endif diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h new file mode 100644 index 000000000000..b911a0720ccd --- /dev/null +++ b/include/dt-bindings/mfd/max77620.h @@ -0,0 +1,39 @@ +/* + * This header provides macros for MAXIM MAX77620 device bindings. + * + * Copyright (c) 2016, NVIDIA Corporation. + * Author: Laxman Dewangan <ldewangan@nvidia.com> + */ + +#ifndef _DT_BINDINGS_MFD_MAX77620_H +#define _DT_BINDINGS_MFD_MAX77620_H + +/* MAX77620 interrupts */ +#define MAX77620_IRQ_TOP_GLBL 0 /* Low-Battery */ +#define MAX77620_IRQ_TOP_SD 1 /* SD power fail */ +#define MAX77620_IRQ_TOP_LDO 2 /* LDO power fail */ +#define MAX77620_IRQ_TOP_GPIO 3 /* GPIO internal int to MAX77620 */ +#define MAX77620_IRQ_TOP_RTC 4 /* RTC */ +#define MAX77620_IRQ_TOP_32K 5 /* 32kHz oscillator */ +#define MAX77620_IRQ_TOP_ONOFF 6 /* ON/OFF oscillator */ +#define MAX77620_IRQ_LBT_MBATLOW 7 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM1 8 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM2 9 /* Thermal alarm status, > 140C */ + +/* FPS event source */ +#define MAX77620_FPS_EVENT_SRC_EN0 0 +#define MAX77620_FPS_EVENT_SRC_EN1 1 +#define MAX77620_FPS_EVENT_SRC_SW 2 + +/* Device state when FPS event LOW */ +#define MAX77620_FPS_INACTIVE_STATE_SLEEP 0 +#define MAX77620_FPS_INACTIVE_STATE_LOW_POWER 1 + +/* FPS source */ +#define MAX77620_FPS_SRC_0 0 +#define MAX77620_FPS_SRC_1 1 +#define MAX77620_FPS_SRC_2 2 +#define MAX77620_FPS_SRC_NONE 3 +#define MAX77620_FPS_SRC_DEF 4 + +#endif diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h index 85aaf66690f9..729ab9fc325e 100644 --- a/include/dt-bindings/thermal/tegra124-soctherm.h +++ b/include/dt-bindings/thermal/tegra124-soctherm.h @@ -9,5 +9,6 @@ #define TEGRA124_SOCTHERM_SENSOR_MEM 1 #define TEGRA124_SOCTHERM_SENSOR_GPU 2 #define TEGRA124_SOCTHERM_SENSOR_PLLX 3 +#define TEGRA124_SOCTHERM_SENSOR_NUM 4 #endif diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index 4915d40d3c3c..2480469ce8fb 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -32,7 +32,7 @@ struct asymmetric_key_subtype { void (*describe)(const struct key *key, struct seq_file *m); /* Destroy a key of this subtype */ - void (*destroy)(void *payload); + void (*destroy)(void *payload_crypto, void *payload_auth); /* Verify the signature on a key of this subtype (optional) */ int (*verify_signature)(const struct key *key, diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 59c1df9cf922..b38240716d41 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -15,6 +15,7 @@ #define _KEYS_ASYMMETRIC_TYPE_H #include <linux/key-type.h> +#include <linux/verification.h> extern struct key_type key_type_asymmetric; @@ -23,9 +24,10 @@ extern struct key_type key_type_asymmetric; * follows: */ enum asymmetric_payload_bits { - asym_crypto, - asym_subtype, - asym_key_ids, + asym_crypto, /* The data representing the key */ + asym_subtype, /* Pointer to an asymmetric_key_subtype struct */ + asym_key_ids, /* Pointer to an asymmetric_key_ids struct */ + asym_auth /* The key's authorisation (signature, parent key ID) */ }; /* @@ -74,6 +76,11 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) return key->payload.data[asym_key_ids]; } +extern struct key *find_asymmetric_key(struct key *keyring, + const struct asymmetric_key_id *id_0, + const struct asymmetric_key_id *id_1, + bool partial); + /* * The payload is at the discretion of the subtype. */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index 39fd38cfa8c9..fbd4647767e9 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -12,51 +12,40 @@ #ifndef _KEYS_SYSTEM_KEYRING_H #define _KEYS_SYSTEM_KEYRING_H +#include <linux/key.h> + #ifdef CONFIG_SYSTEM_TRUSTED_KEYRING -#include <linux/key.h> -#include <crypto/public_key.h> +extern int restrict_link_by_builtin_trusted(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); -extern struct key *system_trusted_keyring; -static inline struct key *get_system_trusted_keyring(void) -{ - return system_trusted_keyring; -} #else -static inline struct key *get_system_trusted_keyring(void) -{ - return NULL; -} +#define restrict_link_by_builtin_trusted restrict_link_reject #endif -#ifdef CONFIG_SYSTEM_DATA_VERIFICATION -extern int system_verify_data(const void *data, unsigned long len, - const void *raw_pkcs7, size_t pkcs7_len, - enum key_being_used_for usage); +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING +extern int restrict_link_by_builtin_and_secondary_trusted( + struct key *keyring, + const struct key_type *type, + const union key_payload *payload); +#else +#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted #endif -#ifdef CONFIG_IMA_MOK_KEYRING -extern struct key *ima_mok_keyring; +#ifdef CONFIG_IMA_BLACKLIST_KEYRING extern struct key *ima_blacklist_keyring; -static inline struct key *get_ima_mok_keyring(void) -{ - return ima_mok_keyring; -} static inline struct key *get_ima_blacklist_keyring(void) { return ima_blacklist_keyring; } #else -static inline struct key *get_ima_mok_keyring(void) -{ - return NULL; -} static inline struct key *get_ima_blacklist_keyring(void) { return NULL; } -#endif /* CONFIG_IMA_MOK_KEYRING */ +#endif /* CONFIG_IMA_BLACKLIST_KEYRING */ #endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index b651aed9dc6b..dda39d8fa189 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -24,9 +24,6 @@ #include <linux/workqueue.h> struct arch_timer_kvm { - /* Is the timer enabled */ - bool enabled; - /* Virtual offset */ cycle_t cntvoff; }; @@ -53,15 +50,15 @@ struct arch_timer_cpu { /* Timer IRQ */ struct kvm_irq_level irq; - /* VGIC mapping */ - struct irq_phys_map *map; - /* Active IRQ state caching */ bool active_cleared_last; + + /* Is the timer enabled */ + bool enabled; }; int kvm_timer_hyp_init(void); -void kvm_timer_enable(struct kvm *kvm); +int kvm_timer_enable(struct kvm_vcpu *vcpu); void kvm_timer_init(struct kvm *kvm); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 281caf847fad..da0a524802cb 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -19,12 +19,17 @@ #ifndef __ASM_ARM_KVM_VGIC_H #define __ASM_ARM_KVM_VGIC_H +#ifdef CONFIG_KVM_NEW_VGIC +#include <kvm/vgic/vgic.h> +#else + #include <linux/kernel.h> #include <linux/kvm.h> #include <linux/irqreturn.h> #include <linux/spinlock.h> #include <linux/types.h> #include <kvm/iodev.h> +#include <linux/irqchip/arm-gic-common.h> #define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_SGIS 16 @@ -157,7 +162,6 @@ struct vgic_io_device { struct irq_phys_map { u32 virt_irq; u32 phys_irq; - u32 irq; }; struct irq_phys_map_entry { @@ -304,9 +308,6 @@ struct vgic_cpu { unsigned long *active_shared; unsigned long *pend_act_shared; - /* Number of list registers on this CPU */ - int nr_lr; - /* CPU vif control registers for world switch */ union { struct vgic_v2_cpu_if vgic_v2; @@ -341,27 +342,28 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, bool level); int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, - struct irq_phys_map *map, bool level); + unsigned int virt_irq, bool level); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, - int virt_irq, int irq); -int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); -bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq); +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) #define vgic_ready(k) ((k)->arch.vgic.ready) +#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ + ((i) < (k)->arch.vgic.nr_irqs)) -int vgic_v2_probe(struct device_node *vgic_node, +int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info, const struct vgic_ops **ops, const struct vgic_params **params); #ifdef CONFIG_KVM_ARM_VGIC_V3 -int vgic_v3_probe(struct device_node *vgic_node, +int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info, const struct vgic_ops **ops, const struct vgic_params **params); #else -static inline int vgic_v3_probe(struct device_node *vgic_node, +static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info, const struct vgic_ops **ops, const struct vgic_params **params) { @@ -369,4 +371,5 @@ static inline int vgic_v3_probe(struct device_node *vgic_node, } #endif +#endif /* old VGIC include */ #endif diff --git a/include/kvm/vgic/vgic.h b/include/kvm/vgic/vgic.h new file mode 100644 index 000000000000..3fbd175265ae --- /dev/null +++ b/include/kvm/vgic/vgic.h @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARM_KVM_VGIC_VGIC_H +#define __ASM_ARM_KVM_VGIC_VGIC_H + +#include <linux/kernel.h> +#include <linux/kvm.h> +#include <linux/irqreturn.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <kvm/iodev.h> + +#define VGIC_V3_MAX_CPUS 255 +#define VGIC_V2_MAX_CPUS 8 +#define VGIC_NR_IRQS_LEGACY 256 +#define VGIC_NR_SGIS 16 +#define VGIC_NR_PPIS 16 +#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) +#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) +#define VGIC_MAX_SPI 1019 +#define VGIC_MAX_RESERVED 1023 +#define VGIC_MIN_LPI 8192 + +enum vgic_type { + VGIC_V2, /* Good ol' GICv2 */ + VGIC_V3, /* New fancy GICv3 */ +}; + +/* same for all guests, as depending only on the _host's_ GIC model */ +struct vgic_global { + /* type of the host GIC */ + enum vgic_type type; + + /* Physical address of vgic virtual cpu interface */ + phys_addr_t vcpu_base; + + /* virtual control interface mapping */ + void __iomem *vctrl_base; + + /* Number of implemented list registers */ + int nr_lr; + + /* Maintenance IRQ number */ + unsigned int maint_irq; + + /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ + int max_gic_vcpus; + + /* Only needed for the legacy KVM_CREATE_IRQCHIP */ + bool can_emulate_gicv2; +}; + +extern struct vgic_global kvm_vgic_global_state; + +#define VGIC_V2_MAX_LRS (1 << 6) +#define VGIC_V3_MAX_LRS 16 +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + +enum vgic_irq_config { + VGIC_CONFIG_EDGE = 0, + VGIC_CONFIG_LEVEL +}; + +struct vgic_irq { + spinlock_t irq_lock; /* Protects the content of the struct */ + struct list_head ap_list; + + struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU + * SPIs and LPIs: The VCPU whose ap_list + * this is queued on. + */ + + struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should + * be sent to, as a result of the + * targets reg (v2) or the + * affinity reg (v3). + */ + + u32 intid; /* Guest visible INTID */ + bool pending; + bool line_level; /* Level only */ + bool soft_pending; /* Level only */ + bool active; /* not used for LPIs */ + bool enabled; + bool hw; /* Tied to HW IRQ */ + u32 hwintid; /* HW INTID number */ + union { + u8 targets; /* GICv2 target VCPUs mask */ + u32 mpidr; /* GICv3 target VCPU */ + }; + u8 source; /* GICv2 SGIs only */ + u8 priority; + enum vgic_irq_config config; /* Level or edge */ +}; + +struct vgic_register_region; + +struct vgic_io_device { + gpa_t base_addr; + struct kvm_vcpu *redist_vcpu; + const struct vgic_register_region *regions; + int nr_regions; + struct kvm_io_device dev; +}; + +struct vgic_dist { + bool in_kernel; + bool ready; + bool initialized; + + /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ + u32 vgic_model; + + int nr_spis; + + /* TODO: Consider moving to global state */ + /* Virtual control interface mapping */ + void __iomem *vctrl_base; + + /* base addresses in guest physical address space: */ + gpa_t vgic_dist_base; /* distributor */ + union { + /* either a GICv2 CPU interface */ + gpa_t vgic_cpu_base; + /* or a number of GICv3 redistributor regions */ + gpa_t vgic_redist_base; + }; + + /* distributor enabled */ + bool enabled; + + struct vgic_irq *spis; + + struct vgic_io_device dist_iodev; + struct vgic_io_device *redist_iodevs; +}; + +struct vgic_v2_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_misr; /* Saved only */ + u64 vgic_eisr; /* Saved only */ + u64 vgic_elrsr; /* Saved only */ + u32 vgic_apr; + u32 vgic_lr[VGIC_V2_MAX_LRS]; +}; + +struct vgic_v3_cpu_if { +#ifdef CONFIG_KVM_ARM_VGIC_V3 + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_sre; /* Restored only, change ignored */ + u32 vgic_misr; /* Saved only */ + u32 vgic_eisr; /* Saved only */ + u32 vgic_elrsr; /* Saved only */ + u32 vgic_ap0r[4]; + u32 vgic_ap1r[4]; + u64 vgic_lr[VGIC_V3_MAX_LRS]; +#endif +}; + +struct vgic_cpu { + /* CPU vif control registers for world switch */ + union { + struct vgic_v2_cpu_if vgic_v2; + struct vgic_v3_cpu_if vgic_v3; + }; + + unsigned int used_lrs; + struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + + spinlock_t ap_list_lock; /* Protects the ap_list */ + + /* + * List of IRQs that this VCPU should consider because they are either + * Active or Pending (hence the name; AP list), or because they recently + * were one of the two and need to be migrated off this list to another + * VCPU. + */ + struct list_head ap_list_head; + + u64 live_lrs; +}; + +int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); +void kvm_vgic_early_init(struct kvm *kvm); +int kvm_vgic_create(struct kvm *kvm, u32 type); +void kvm_vgic_destroy(struct kvm *kvm); +void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_vgic_map_resources(struct kvm *kvm); +int kvm_vgic_hyp_init(void); + +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level); +int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level); +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq); +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq); + +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); + +#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) +#define vgic_initialized(k) ((k)->arch.vgic.initialized) +#define vgic_ready(k) ((k)->arch.vgic.ready) +#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ + ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) + +bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); +void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); +void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_KVM_ARM_VGIC_V3 +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); +#else +static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) +{ +} +#endif + +/** + * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW + * + * The host's GIC naturally limits the maximum amount of VCPUs a guest + * can use. + */ +static inline int kvm_vgic_get_max_vcpus(void) +{ + return kvm_vgic_global_state.max_gic_vcpus; +} + +#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */ diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 10fe2a211c2e..27e9ec8778eb 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -86,7 +86,7 @@ struct pl08x_channel_data { * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 */ struct pl08x_platform_data { - const struct pl08x_channel_data *slave_channels; + struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; int (*get_xfer_signal)(const struct pl08x_channel_data *); diff --git a/include/linux/ata.h b/include/linux/ata.h index f310ec0f072e..99346be5a7ca 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -243,6 +243,7 @@ enum { ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, ATA_CMD_FPDMA_READ = 0x60, ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_NCQ_NON_DATA = 0x63, ATA_CMD_FPDMA_SEND = 0x64, ATA_CMD_FPDMA_RECV = 0x65, ATA_CMD_PIO_READ = 0x20, @@ -301,19 +302,43 @@ enum { ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, ATA_CMD_REQ_SENSE_DATA = 0x0B, ATA_CMD_SANITIZE_DEVICE = 0xB4, + ATA_CMD_ZAC_MGMT_IN = 0x4A, + ATA_CMD_ZAC_MGMT_OUT = 0x9F, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, + /* Subcmds for ATA_CMD_FPDMA_RECV */ + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, + /* Subcmds for ATA_CMD_FPDMA_SEND */ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, + /* Subcmds for ATA_CMD_NCQ_NON_DATA */ + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, + + /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, + + /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, + /* READ_LOG_EXT pages */ + ATA_LOG_DIRECTORY = 0x0, ATA_LOG_SATA_NCQ = 0x10, + ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, + ATA_LOG_ZONED_INFORMATION = 0x09, ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, @@ -328,8 +353,25 @@ enum { ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, - ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), + ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, + + /* NCQ Non-Data log */ + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), + ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, /* READ/WRITE LONG (obsolete) */ ATA_CMD_READ_LONG = 0x22, @@ -386,6 +428,8 @@ enum { SATA_SSP = 0x06, /* Software Settings Preservation */ SATA_DEVSLP = 0x09, /* Device Sleep */ + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, ATA_SET_MAX_PASSWD = 0x01, @@ -529,6 +573,8 @@ struct ata_bmdma_prd { #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) #define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) +#define ata_id_has_ncq_autosense(id) \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) static inline bool ata_id_has_hipm(const u16 *id) { @@ -717,6 +763,20 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id) return false; } +static inline bool ata_id_has_sense_reporting(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); +} + +static inline bool ata_id_sense_reporting_enabled(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); +} + /** * ata_id_major_version - get ATA level of drive * @id: Identify data @@ -821,6 +881,11 @@ static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); } +static inline bool ata_id_has_ncq_non_data(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); +} + static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && @@ -872,6 +937,11 @@ static inline bool ata_id_is_ssd(const u16 *id) return id[ATA_ID_ROT_SPEED] == 0x01; } +static inline u8 ata_id_zoned_cap(const u16 *id) +{ + return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); +} + static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) { /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ diff --git a/include/linux/audit.h b/include/linux/audit.h index e38e3fc13ea8..961a417d641e 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <linux/ptrace.h> #include <uapi/linux/audit.h> +#include <linux/tty.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -347,6 +348,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) return tsk->sessionid; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + struct tty_struct *tty = NULL; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + if (tsk->signal) + tty = tty_kref_get(tsk->signal->tty); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + return tty; +} + +static inline void audit_put_tty(struct tty_struct *tty) +{ + tty_kref_put(tty); +} + extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); @@ -504,6 +522,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return -1; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + return NULL; +} +static inline void audit_put_tty(struct tty_struct *tty) +{ } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h new file mode 100644 index 000000000000..c06b47c84e1a --- /dev/null +++ b/include/linux/bcm47xx_sprom.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BCM47XX_SPROM_H +#define __BCM47XX_SPROM_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> + +#ifdef CONFIG_BCM47XX_SPROM +int bcm47xx_sprom_register_fallbacks(void); +#else +static inline int bcm47xx_sprom_register_fallbacks(void) +{ + return -ENOTSUPP; +}; +#endif + +#endif /* __BCM47XX_SPROM_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0367c63f5960..e6b41f42602b 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -4,6 +4,7 @@ #include <linux/pci.h> #include <linux/mod_devicetable.h> +#include <linux/bcma/bcma_driver_arm_c9.h> #include <linux/bcma/bcma_driver_chipcommon.h> #include <linux/bcma/bcma_driver_pci.h> #include <linux/bcma/bcma_driver_pcie2.h> diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h new file mode 100644 index 000000000000..93bd73d670d5 --- /dev/null +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -0,0 +1,15 @@ +#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ +#define LINUX_BCMA_DRIVER_ARM_C9_H_ + +/* DMU (Device Management Unit) */ +#define BCMA_DMU_CRU_USB2_CONTROL 0x0164 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12 +#define BCMA_DMU_CRU_CLKSET_KEY 0x0180 +#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0 +#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010 +#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000 + +#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 846513c73606..a5ac2cad5cb7 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -587,7 +587,6 @@ struct mtd_info; struct bcma_sflash { bool present; - u32 window; u32 blocksize; u16 numblocks; u32 size; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 576e4639ca60..314b3caa701c 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -65,6 +65,7 @@ struct coredump_params { unsigned long limit; unsigned long mm_flags; loff_t written; + loff_t pos; }; /* diff --git a/include/linux/bitops.h b/include/linux/bitops.h index defeaac0745f..299e76b59fe9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -227,6 +227,22 @@ static inline unsigned long __ffs64(u64 word) }) #endif +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, _clear, _test) \ +({ \ + const typeof(*ptr) clear = (_clear), test = (_test); \ + typeof(*ptr) old, new; \ + \ + do { \ + old = ACCESS_ONCE(*ptr); \ + new = old & ~clear; \ + } while (!(old & test) && \ + cmpxchg(ptr, old, new) != old); \ + \ + !(old & test); \ +}) +#endif + #ifndef find_last_bit /** * find_last_bit - find the last set bit in a memory region diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1fd8fdff2f81..3d9cf326574f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -768,6 +768,17 @@ static inline void rq_flush_dcache_pages(struct request *rq) } #endif +#ifdef CONFIG_PRINTK +#define vfs_msg(sb, level, fmt, ...) \ + __vfs_msg(sb, level, fmt, ##__VA_ARGS__) +#else +#define vfs_msg(sb, level, fmt, ...) \ +do { \ + no_printk(fmt, ##__VA_ARGS__); \ + __vfs_msg(sb, "", " "); \ +} while (0) +#endif + extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern blk_qc_t generic_make_request(struct bio *bio); @@ -1660,7 +1671,7 @@ struct block_device_operations { int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); long (*direct_access)(struct block_device *, sector_t, void __pmem **, - pfn_t *); + pfn_t *, long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ @@ -1680,6 +1691,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); +extern int bdev_dax_supported(struct super_block *, int); +extern bool bdev_dax_capable(struct block_device *); #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 35b22f94d2d2..f9be32691718 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size, unsigned long goal); extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, - unsigned long limit); + unsigned long limit) __malloc; extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; void *__alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, - unsigned long goal); + unsigned long goal) __malloc; #ifdef CONFIG_NO_BOOTMEM /* We are using top down, so it is safe to use 0 here */ diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h index b827e066e55a..146507df8650 100644 --- a/include/linux/ceph/ceph_frag.h +++ b/include/linux/ceph/ceph_frag.h @@ -51,11 +51,11 @@ static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) return ceph_frag_make(newbits, ceph_frag_value(f) | (i << (24 - newbits))); } -static inline int ceph_frag_is_leftmost(__u32 f) +static inline bool ceph_frag_is_leftmost(__u32 f) { return ceph_frag_value(f) == 0; } -static inline int ceph_frag_is_rightmost(__u32 f) +static inline bool ceph_frag_is_rightmost(__u32 f) { return ceph_frag_value(f) == ceph_frag_mask(f); } diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 37f28bf55ce4..dfce616002ad 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -153,8 +153,9 @@ struct ceph_dir_layout { /* watch-notify operations */ enum { - WATCH_NOTIFY = 1, /* notifying watcher */ - WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */ + CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */ }; @@ -207,6 +208,8 @@ struct ceph_mon_subscribe_ack { struct ceph_fsid fsid; } __attribute__ ((packed)); +#define CEPH_FS_CLUSTER_ID_NONE -1 + /* * mdsmap flags */ @@ -344,6 +347,18 @@ extern const char *ceph_mds_op_name(int op); #define CEPH_XATTR_REPLACE (1 << 1) #define CEPH_XATTR_REMOVE (1 << 31) +/* + * readdir request flags; + */ +#define CEPH_READDIR_REPLY_BITFLAGS (1<<0) + +/* + * readdir reply flags. + */ +#define CEPH_READDIR_FRAG_END (1<<0) +#define CEPH_READDIR_FRAG_COMPLETE (1<<8) +#define CEPH_READDIR_HASH_ORDER (1<<9) + union ceph_mds_request_args { struct { __le32 mask; /* CEPH_CAP_* */ @@ -361,6 +376,7 @@ union ceph_mds_request_args { __le32 frag; /* which dir fragment */ __le32 max_entries; /* how many dentries to grab */ __le32 max_bytes; + __le16 flags; } __attribute__ ((packed)) readdir; struct { __le32 mode; diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index a6ef9cc267ec..19e9932f3e77 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -47,7 +47,7 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n) /* * bounds check input. */ -static inline int ceph_has_room(void **p, void *end, size_t n) +static inline bool ceph_has_room(void **p, void *end, size_t n) { return end >= *p && n <= end - *p; } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index db92a8d4926e..690985daad1c 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -180,6 +180,63 @@ static inline int calc_pages_for(u64 off, u64 len) (off >> PAGE_SHIFT); } +/* + * These are not meant to be generic - an integer key is assumed. + */ +#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +static void insert_##name(struct rb_root *root, type *t) \ +{ \ + struct rb_node **n = &root->rb_node; \ + struct rb_node *parent = NULL; \ + \ + BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \ + \ + while (*n) { \ + type *cur = rb_entry(*n, type, nodefld); \ + \ + parent = *n; \ + if (t->keyfld < cur->keyfld) \ + n = &(*n)->rb_left; \ + else if (t->keyfld > cur->keyfld) \ + n = &(*n)->rb_right; \ + else \ + BUG(); \ + } \ + \ + rb_link_node(&t->nodefld, parent, n); \ + rb_insert_color(&t->nodefld, root); \ +} \ +static void erase_##name(struct rb_root *root, type *t) \ +{ \ + BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \ + rb_erase(&t->nodefld, root); \ + RB_CLEAR_NODE(&t->nodefld); \ +} + +#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ +static type *lookup_##name(struct rb_root *root, \ + typeof(((type *)0)->keyfld) key) \ +{ \ + struct rb_node *n = root->rb_node; \ + \ + while (n) { \ + type *cur = rb_entry(n, type, nodefld); \ + \ + if (key < cur->keyfld) \ + n = n->rb_left; \ + else if (key > cur->keyfld) \ + n = n->rb_right; \ + else \ + return cur; \ + } \ + \ + return NULL; \ +} + +#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) + extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index e230e7ed60d3..e2a92df08b47 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -39,20 +39,31 @@ struct ceph_mon_request { ceph_monc_request_func_t do_request; }; +typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *); + /* * ceph_mon_generic_request is being used for the statfs and * mon_get_version requests which are being done a bit differently * because we need to get data back to the caller */ struct ceph_mon_generic_request { + struct ceph_mon_client *monc; struct kref kref; u64 tid; struct rb_node node; int result; - void *buf; + struct completion completion; + ceph_monc_callback_t complete_cb; + u64 private_data; /* r_tid/linger_id */ + struct ceph_msg *request; /* original request */ struct ceph_msg *reply; /* and reply */ + + union { + struct ceph_statfs *st; + u64 newest; + } u; }; struct ceph_mon_client { @@ -77,7 +88,6 @@ struct ceph_mon_client { /* pending generic requests */ struct rb_root generic_request_tree; - int num_generic_requests; u64 last_tid; /* subs, indexed with CEPH_SUB_* */ @@ -86,6 +96,7 @@ struct ceph_mon_client { bool want; u32 have; /* epoch */ } subs[3]; + int fs_cluster_id; /* "mdsmap.<id>" sub */ #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; @@ -116,16 +127,18 @@ extern const char *ceph_sub_str[]; bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, bool continuous); void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch); +void ceph_monc_renew_subs(struct ceph_mon_client *monc); -extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout); extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf); -extern int ceph_monc_do_get_version(struct ceph_mon_client *monc, - const char *what, u64 *newest); +int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, + u64 *newest); +int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, + ceph_monc_callback_t cb, u64 private_data); extern int ceph_monc_open_session(struct ceph_mon_client *monc); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index cbf460927c42..1b3b6e155392 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -20,10 +20,11 @@ struct ceph_osd_client; /* * completion callback for async writepages */ -typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, - struct ceph_msg *); +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); +#define CEPH_HOMELESS_OSD -1 + /* a given osd we're communicating with */ struct ceph_osd { atomic_t o_ref; @@ -32,16 +33,15 @@ struct ceph_osd { int o_incarnation; struct rb_node o_node; struct ceph_connection o_con; - struct list_head o_requests; - struct list_head o_linger_requests; + struct rb_root o_requests; + struct rb_root o_linger_requests; struct list_head o_osd_lru; struct ceph_auth_handshake o_auth; unsigned long lru_ttl; - int o_marked_for_keepalive; struct list_head o_keepalive_item; + struct mutex lock; }; - #define CEPH_OSD_SLAB_OPS 2 #define CEPH_OSD_MAX_OPS 16 @@ -104,76 +104,95 @@ struct ceph_osd_req_op { struct ceph_osd_data response_data; __u8 class_len; __u8 method_len; - __u8 argc; + u32 indata_len; } cls; struct { u64 cookie; - u64 ver; - u32 prot_ver; - u32 timeout; - __u8 flag; + __u8 op; /* CEPH_OSD_WATCH_OP_ */ + u32 gen; } watch; struct { + struct ceph_osd_data request_data; + } notify_ack; + struct { + u64 cookie; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + } notify; + struct { u64 expected_object_size; u64 expected_write_size; } alloc_hint; }; }; +struct ceph_osd_request_target { + struct ceph_object_id base_oid; + struct ceph_object_locator base_oloc; + struct ceph_object_id target_oid; + struct ceph_object_locator target_oloc; + + struct ceph_pg pgid; + u32 pg_num; + u32 pg_num_mask; + struct ceph_osds acting; + struct ceph_osds up; + int size; + int min_size; + bool sort_bitwise; + + unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool paused; + + int osd; +}; + /* an in-flight request */ struct ceph_osd_request { u64 r_tid; /* unique for this client */ struct rb_node r_node; - struct list_head r_req_lru_item; - struct list_head r_osd_item; - struct list_head r_linger_item; - struct list_head r_linger_osd_item; + struct rb_node r_mc_node; /* map check */ struct ceph_osd *r_osd; - struct ceph_pg r_pgid; - int r_pg_osds[CEPH_PG_MAX_SIZE]; - int r_num_pg_osds; + + struct ceph_osd_request_target r_t; +#define r_base_oid r_t.base_oid +#define r_base_oloc r_t.base_oloc +#define r_flags r_t.flags struct ceph_msg *r_request, *r_reply; - int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ /* request osd ops array */ unsigned int r_num_ops; - /* these are updated on each send */ - __le32 *r_request_osdmap_epoch; - __le32 *r_request_flags; - __le64 *r_request_pool; - void *r_request_pgid; - __le32 *r_request_attempts; - bool r_paused; - struct ceph_eversion *r_request_reassert_version; - int r_result; - int r_got_reply; - int r_linger; + bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion, r_safe_completion; + struct completion r_completion; + struct completion r_safe_completion; /* fsync waiter */ ceph_osdc_callback_t r_callback; ceph_osdc_unsafe_callback_t r_unsafe_callback; - struct ceph_eversion r_reassert_version; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ void *r_priv; /* ditto */ - struct ceph_object_locator r_base_oloc; - struct ceph_object_id r_base_oid; - struct ceph_object_locator r_target_oloc; - struct ceph_object_id r_target_oid; - - u64 r_snapid; - unsigned long r_stamp; /* send OR check time */ + /* set by submitter */ + u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */ + struct ceph_snap_context *r_snapc; /* for writes */ + struct timespec r_mtime; /* ditto */ + u64 r_data_offset; /* ditto */ + bool r_linger; /* don't resend on failure */ - struct ceph_snap_context *r_snapc; /* snap context for writes */ + /* internal */ + unsigned long r_stamp; /* jiffies, send or check time */ + int r_attempts; + struct ceph_eversion r_replay_version; /* aka reassert_version */ + u32 r_last_force_resend; + u32 r_map_dne_bound; struct ceph_osd_req_op r_ops[]; }; @@ -182,44 +201,70 @@ struct ceph_request_redirect { struct ceph_object_locator oloc; }; -struct ceph_osd_event { - u64 cookie; - int one_shot; +typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie, + u64 notifier_id, void *data, size_t data_len); +typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err); + +struct ceph_osd_linger_request { struct ceph_osd_client *osdc; - void (*cb)(u64, u64, u8, void *); - void *data; - struct rb_node node; - struct list_head osd_node; + u64 linger_id; + bool committed; + bool is_watch; /* watch or notify */ + + struct ceph_osd *osd; + struct ceph_osd_request *reg_req; + struct ceph_osd_request *ping_req; + unsigned long ping_sent; + unsigned long watch_valid_thru; + struct list_head pending_lworks; + + struct ceph_osd_request_target t; + u32 last_force_resend; + u32 map_dne_bound; + + struct timespec mtime; + struct kref kref; -}; + struct mutex lock; + struct rb_node node; /* osd */ + struct rb_node osdc_node; /* osdc */ + struct rb_node mc_node; /* map check */ + struct list_head scan_item; + + struct completion reg_commit_wait; + struct completion notify_finish_wait; + int reg_commit_error; + int notify_finish_error; + int last_error; + + u32 register_gen; + u64 notify_id; + + rados_watchcb2_t wcb; + rados_watcherrcb_t errcb; + void *data; -struct ceph_osd_event_work { - struct work_struct work; - struct ceph_osd_event *event; - u64 ver; - u64 notify_id; - u8 opcode; + struct page ***preply_pages; + size_t *preply_len; }; struct ceph_osd_client { struct ceph_client *client; struct ceph_osdmap *osdmap; /* current map */ - struct rw_semaphore map_sem; - struct completion map_waiters; - u64 last_requested_map; + struct rw_semaphore lock; - struct mutex request_mutex; struct rb_root osds; /* osds */ struct list_head osd_lru; /* idle osds */ - u64 timeout_tid; /* tid of timeout triggering rq */ - u64 last_tid; /* tid of last request */ - struct rb_root requests; /* pending requests */ - struct list_head req_lru; /* in-flight lru */ - struct list_head req_unsent; /* unsent/need-resend queue */ - struct list_head req_notarget; /* map to no osd */ - struct list_head req_linger; /* lingering requests */ - int num_requests; + spinlock_t osd_lru_lock; + struct ceph_osd homeless_osd; + atomic64_t last_tid; /* tid of last request */ + u64 last_linger_id; + struct rb_root linger_requests; /* lingering requests */ + struct rb_root map_checks; + struct rb_root linger_map_checks; + atomic_t num_requests; + atomic_t num_homeless; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; #ifdef CONFIG_DEBUG_FS @@ -231,13 +276,14 @@ struct ceph_osd_client { struct ceph_msgpool msgpool_op; struct ceph_msgpool msgpool_op_reply; - spinlock_t event_lock; - struct rb_root event_tree; - u64 event_count; - struct workqueue_struct *notify_wq; }; +static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) +{ + return osdc->osdmap->flags & flag; +} + extern int ceph_osdc_setup(void); extern void ceph_osdc_cleanup(void); @@ -271,9 +317,6 @@ extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, extern struct ceph_osd_data *osd_req_op_extent_osd_data( struct ceph_osd_request *osd_req, unsigned int which); -extern struct ceph_osd_data *osd_req_op_cls_response_data( - struct ceph_osd_request *osd_req, - unsigned int which); extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, unsigned int which, @@ -309,9 +352,6 @@ extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode); -extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, - u64 cookie, u64 version, int flag); extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, @@ -322,11 +362,7 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * unsigned int num_ops, bool use_mempool, gfp_t gfp_flags); - -extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, - struct ceph_snap_context *snapc, - u64 snap_id, - struct timespec *mtime); +int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, struct ceph_file_layout *layout, @@ -338,9 +374,6 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, u32 truncate_seq, u64 truncate_size, bool use_mempool); -extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, - struct ceph_osd_request *req); - extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); @@ -353,6 +386,7 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, extern void ceph_osdc_sync(struct ceph_osd_client *osdc); extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc); +void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc); extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, @@ -371,11 +405,33 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct timespec *mtime, struct page **pages, int nr_pages); -/* watch/notify events */ -extern int ceph_osdc_create_event(struct ceph_osd_client *osdc, - void (*event_cb)(u64, u64, u8, void *), - void *data, struct ceph_osd_event **pevent); -extern void ceph_osdc_cancel_event(struct ceph_osd_event *event); -extern void ceph_osdc_put_event(struct ceph_osd_event *event); +/* watch/notify */ +struct ceph_osd_linger_request * +ceph_osdc_watch(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + rados_watchcb2_t wcb, + rados_watcherrcb_t errcb, + void *data); +int ceph_osdc_unwatch(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); + +int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + u64 notify_id, + u64 cookie, + void *payload, + size_t payload_len); +int ceph_osdc_notify(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *payload, + size_t payload_len, + u32 timeout, + struct page ***preply_pages, + size_t *preply_len); +int ceph_osdc_watch_check(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); #endif diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index e55c08bc3a96..9ccf4dbe55f8 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -24,21 +24,29 @@ struct ceph_pg { uint32_t seed; }; -#define CEPH_POOL_FLAG_HASHPSPOOL 1 +int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs); + +#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id + together */ +#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ struct ceph_pg_pool_info { struct rb_node node; s64 id; - u8 type; + u8 type; /* CEPH_POOL_TYPE_* */ u8 size; + u8 min_size; u8 crush_ruleset; u8 object_hash; + u32 last_force_request_resend; u32 pg_num, pgp_num; int pg_num_mask, pgp_num_mask; s64 read_tier; s64 write_tier; /* wins for read+write ops */ - u64 flags; + u64 flags; /* CEPH_POOL_FLAG_* */ char *name; + + bool was_full; /* for handle_one_map() */ }; static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) @@ -57,6 +65,22 @@ struct ceph_object_locator { s64 pool; }; +static inline void ceph_oloc_init(struct ceph_object_locator *oloc) +{ + oloc->pool = -1; +} + +static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc) +{ + return oloc->pool == -1; +} + +static inline void ceph_oloc_copy(struct ceph_object_locator *dest, + const struct ceph_object_locator *src) +{ + dest->pool = src->pool; +} + /* * Maximum supported by kernel client object name length * @@ -64,11 +88,47 @@ struct ceph_object_locator { */ #define CEPH_MAX_OID_NAME_LEN 100 +/* + * 51-char inline_name is long enough for all cephfs and all but one + * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be + * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all + * other rbd requests fit into inline_name. + * + * Makes ceph_object_id 64 bytes on 64-bit. + */ +#define CEPH_OID_INLINE_LEN 52 + +/* + * Both inline and external buffers have space for a NUL-terminator, + * which is carried around. It's not required though - RADOS object + * names don't have to be NUL-terminated and may contain NULs. + */ struct ceph_object_id { - char name[CEPH_MAX_OID_NAME_LEN]; + char *name; + char inline_name[CEPH_OID_INLINE_LEN]; int name_len; }; +static inline void ceph_oid_init(struct ceph_object_id *oid) +{ + oid->name = oid->inline_name; + oid->name_len = 0; +} + +static inline bool ceph_oid_empty(const struct ceph_object_id *oid) +{ + return oid->name == oid->inline_name && !oid->name_len; +} + +void ceph_oid_copy(struct ceph_object_id *dest, + const struct ceph_object_id *src); +__printf(2, 3) +void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...); +__printf(3, 4) +int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, + const char *fmt, ...); +void ceph_oid_destroy(struct ceph_object_id *oid); + struct ceph_pg_mapping { struct rb_node node; struct ceph_pg pgid; @@ -87,7 +147,6 @@ struct ceph_pg_mapping { struct ceph_osdmap { struct ceph_fsid fsid; u32 epoch; - u32 mkfs_epoch; struct ceph_timespec created, modified; u32 flags; /* CEPH_OSDMAP_* */ @@ -113,52 +172,23 @@ struct ceph_osdmap { int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; }; -static inline void ceph_oid_set_name(struct ceph_object_id *oid, - const char *name) -{ - int len; - - len = strlen(name); - if (len > sizeof(oid->name)) { - WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n", - name, len, sizeof(oid->name)); - len = sizeof(oid->name); - } - - memcpy(oid->name, name, len); - oid->name_len = len; -} - -static inline void ceph_oid_copy(struct ceph_object_id *dest, - struct ceph_object_id *src) -{ - BUG_ON(src->name_len > sizeof(dest->name)); - memcpy(dest->name, src->name, src->name_len); - dest->name_len = src->name_len; -} - -static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) { return osd >= 0 && osd < map->max_osd && (map->osd_state[osd] & CEPH_OSD_EXISTS); } -static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd) { return ceph_osd_exists(map, osd) && (map->osd_state[osd] & CEPH_OSD_UP); } -static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd) +static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) { return !ceph_osd_is_up(map, osd); } -static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) -{ - return map && (map->flags & flag); -} - extern char *ceph_osdmap_state_str(char *str, int len, int state); extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); @@ -192,28 +222,59 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) return 0; } +struct ceph_osdmap *ceph_osdmap_alloc(void); extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); -extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, - struct ceph_osdmap *map, - struct ceph_messenger *msgr); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map); extern void ceph_osdmap_destroy(struct ceph_osdmap *map); +struct ceph_osds { + int osds[CEPH_PG_MAX_SIZE]; + int size; + int primary; /* id, NOT index */ +}; + +static inline void ceph_osds_init(struct ceph_osds *set) +{ + set->size = 0; + set->primary = -1; +} + +void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src); + +bool ceph_is_new_interval(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + const struct ceph_osds *old_up, + const struct ceph_osds *new_up, + int old_size, + int new_size, + int old_min_size, + int new_min_size, + u32 old_pg_num, + u32 new_pg_num, + bool old_sort_bitwise, + bool new_sort_bitwise, + const struct ceph_pg *pgid); +bool ceph_osds_changed(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + bool any_change); + /* calculate mapping of a file extent to an object */ extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 len, u64 *bno, u64 *oxoff, u64 *oxlen); -/* calculate mapping of object to a placement group */ -extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap, - struct ceph_object_locator *oloc, - struct ceph_object_id *oid, - struct ceph_pg *pg_out); - -extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, - struct ceph_pg pgid, - int *osds, int *primary); -extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, - struct ceph_pg pgid); +int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); + +void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid, + struct ceph_osds *up, + struct ceph_osds *acting); +int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid); extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 2f822dca1046..5c0da61cb763 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -114,8 +114,8 @@ struct ceph_object_layout { * compound epoch+version, used by storage layer to serialize mutations */ struct ceph_eversion { - __le32 epoch; __le64 version; + __le32 epoch; } __attribute__ ((packed)); /* @@ -153,6 +153,11 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */ #define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */ #define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */ +#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */ +#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */ +#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */ +#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */ +#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */ /* * The error code to return when an OSD can't handle a write @@ -389,6 +394,13 @@ enum { CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */ CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */ CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */ + CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */ + CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if + pool uses pool snaps */ + CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */ + CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */ + CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */ + CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */ }; enum { @@ -415,7 +427,17 @@ enum { CEPH_OSD_CMPXATTR_MODE_U64 = 2 }; -#define RADOS_NOTIFY_VER 1 +enum { + CEPH_OSD_WATCH_OP_UNWATCH = 0, + CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, + /* note: use only ODD ids to prevent pre-giant code from + interpreting the op as UNWATCH */ + CEPH_OSD_WATCH_OP_WATCH = 3, + CEPH_OSD_WATCH_OP_RECONNECT = 5, + CEPH_OSD_WATCH_OP_PING = 7, +}; + +const char *ceph_osd_watch_op_name(int o); /* * an individual object operation. each may be accompanied by some data @@ -450,10 +472,14 @@ struct ceph_osd_op { } __attribute__ ((packed)) snap; struct { __le64 cookie; - __le64 ver; - __u8 flag; /* 0 = unwatch, 1 = watch */ + __le64 ver; /* no longer used */ + __u8 op; /* CEPH_OSD_WATCH_OP_* */ + __le32 gen; /* registration generation */ } __attribute__ ((packed)) watch; struct { + __le64 cookie; + } __attribute__ ((packed)) notify; + struct { __le64 offset, length; __le64 src_offset; } __attribute__ ((packed)) clonerange; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index da95258127aa..fb39d5add173 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -25,13 +25,14 @@ #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ #define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ #define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ -#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */ + /* unused */ #define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ #define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ #define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ struct clk; struct clk_hw; @@ -282,10 +283,17 @@ extern const struct clk_ops clk_fixed_rate_ops; struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate); +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate, unsigned long fixed_accuracy); void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); + void of_fixed_clk_setup(struct device_node *np); /** @@ -326,7 +334,12 @@ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); void clk_unregister_gate(struct clk *clk); +void clk_hw_unregister_gate(struct clk_hw *hw); struct clk_div_table { unsigned int val; @@ -407,12 +420,22 @@ struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); void clk_unregister_divider(struct clk *clk); +void clk_hw_unregister_divider(struct clk_hw *hw); /** * struct clk_mux - multiplexer clock @@ -463,14 +486,25 @@ struct clk *clk_register_mux(struct device *dev, const char *name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); void clk_unregister_mux(struct clk *clk); +void clk_hw_unregister_mux(struct clk_hw *hw); void of_fixed_factor_clk_setup(struct device_node *node); @@ -499,6 +533,10 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); void clk_unregister_fixed_factor(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_hw_unregister_fixed_factor(struct clk_hw *hw); /** * struct clk_fractional_divider - adjustable fractional divider clock @@ -533,6 +571,11 @@ struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +void clk_hw_unregister_fractional_divider(struct clk_hw *hw); /** * struct clk_multiplier - adjustable multiplier clock @@ -603,6 +646,14 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); +void clk_unregister_composite(struct clk *clk); +struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_hw_unregister_composite(struct clk_hw *hw); /*** * struct clk_gpio_gate - gpio gated clock @@ -625,6 +676,10 @@ extern const struct clk_ops clk_gpio_gate_ops; struct clk *clk_register_gpio_gate(struct device *dev, const char *name, const char *parent_name, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); /** * struct clk_gpio_mux - gpio controlled clock multiplexer @@ -640,6 +695,10 @@ extern const struct clk_ops clk_gpio_mux_ops; struct clk *clk_register_gpio_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned gpio, bool active_low, unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); /** * clk_register - allocate a new clock, register it and return an opaque cookie @@ -655,9 +714,15 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name, struct clk *clk_register(struct device *dev, struct clk_hw *hw); struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); +int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); + void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); +void clk_hw_unregister(struct clk_hw *hw); +void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); + /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); @@ -703,6 +768,11 @@ struct clk_onecell_data { unsigned int clk_num; }; +struct clk_hw_onecell_data { + size_t num; + struct clk_hw *hws[]; +}; + extern struct of_device_id __clk_of_table; #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) @@ -712,15 +782,24 @@ int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, void *data), void *data); +int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); void of_clk_del_provider(struct device_node *np); struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, + void *data); struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, + void *data); unsigned int of_clk_get_parent_count(struct device_node *np); int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size); const char *of_clk_get_parent_name(struct device_node *np, int index); - +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags); void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_OF */ @@ -732,17 +811,34 @@ static inline int of_clk_add_provider(struct device_node *np, { return 0; } +static inline int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} static inline void of_clk_del_provider(struct device_node *np) {} static inline struct clk *of_clk_src_simple_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline struct clk *of_clk_src_onecell_get( struct of_phandle_args *clkspec, void *data) { return ERR_PTR(-ENOENT); } +static inline struct clk_hw * +of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} static inline int of_clk_get_parent_count(struct device_node *np) { return 0; @@ -757,6 +853,11 @@ static inline const char *of_clk_get_parent_name(struct device_node *np, { return NULL; } +static inline int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) +{ + return 0; +} static inline void of_clk_init(const struct of_device_id *matches) {} #endif /* CONFIG_OF */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index dc5164a6df29..6110fe09ed18 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -37,6 +37,7 @@ * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() * @min_divider: minimum valid non-bypass divider value (actual) * @max_divider: maximum valid non-bypass divider value (actual) + * @max_rate: maximum clock rate for the DPLL * @modes: possible values of @enable_mask * @autoidle_reg: register containing the DPLL autoidle mode bitfield * @idlest_reg: register containing the DPLL idle status bitfield @@ -81,6 +82,7 @@ struct dpll_data { u8 last_rounded_n; u8 min_divider; u16 max_divider; + unsigned long max_rate; u8 modes; void __iomem *autoidle_reg; void __iomem *idlest_reg; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index c2c04f7cbe8a..2eabc862abdb 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -15,6 +15,7 @@ #include <asm/clkdev.h> struct clk; +struct clk_hw; struct device; struct clk_lookup { @@ -34,18 +35,22 @@ struct clk_lookup { struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); void clkdev_add_table(struct clk_lookup *, size_t); int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); -int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); +int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); #ifdef CONFIG_COMMON_CLK int __clk_get(struct clk *clk); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index d7c8de583a23..a58c852a268f 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,21 +2,46 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ -/* compaction didn't start as it was deferred due to past failures */ -#define COMPACT_DEFERRED 0 -/* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 1 -/* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 2 -/* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 3 -/* The full zone was compacted */ -#define COMPACT_COMPLETE 4 -/* For more detailed tracepoint output */ -#define COMPACT_NO_SUITABLE_PAGE 5 -#define COMPACT_NOT_SUITABLE_ZONE 6 -#define COMPACT_CONTENDED 7 /* When adding new states, please adjust include/trace/events/compaction.h */ +enum compact_result { + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NOT_SUITABLE_ZONE, + /* + * compaction didn't start as it was not possible or direct reclaim + * was more suitable + */ + COMPACT_SKIPPED, + /* compaction didn't start as it was deferred due to past failures */ + COMPACT_DEFERRED, + + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NO_SUITABLE_PAGE, + /* compaction should continue to another pageblock */ + COMPACT_CONTINUE, + + /* + * The full zone was compacted scanned but wasn't successfull to compact + * suitable pages. + */ + COMPACT_COMPLETE, + /* + * direct compaction has scanned part of the zone but wasn't successfull + * to compact suitable pages. + */ + COMPACT_PARTIAL_SKIPPED, + + /* compaction terminated prematurely due to lock contentions */ + COMPACT_CONTENDED, + + /* + * direct compaction partially compacted a zone and there might be + * suitable pages + */ + COMPACT_PARTIAL, +}; /* Used to signal whether compaction detected need_sched() or lock contention */ /* No contention detected */ @@ -38,13 +63,14 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); -extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, - int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, int *contended); +extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, + unsigned int order, + unsigned int alloc_flags, const struct alloc_context *ac, + enum migrate_mode mode, int *contended); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order, - int alloc_flags, int classzone_idx); +extern enum compact_result compaction_suitable(struct zone *zone, int order, + unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); @@ -52,12 +78,80 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); +/* Compaction has made some progress and retrying makes sense */ +static inline bool compaction_made_progress(enum compact_result result) +{ + /* + * Even though this might sound confusing this in fact tells us + * that the compaction successfully isolated and migrated some + * pageblocks. + */ + if (result == COMPACT_PARTIAL) + return true; + + return false; +} + +/* Compaction has failed and it doesn't make much sense to keep retrying. */ +static inline bool compaction_failed(enum compact_result result) +{ + /* All zones were scanned completely and still not result. */ + if (result == COMPACT_COMPLETE) + return true; + + return false; +} + +/* + * Compaction has backed off for some reason. It might be throttling or + * lock contention. Retrying is still worthwhile. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ + /* + * Compaction backed off due to watermark checks for order-0 + * so the regular reclaim has to try harder and reclaim something. + */ + if (result == COMPACT_SKIPPED) + return true; + + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (result == COMPACT_DEFERRED) + return true; + + /* + * If compaction in async mode encounters contention or blocks higher + * priority task we back off early rather than cause stalls. + */ + if (result == COMPACT_CONTENDED) + return true; + + /* + * Page scanners have met but we haven't scanned full zones so this + * is a back off in fact. + */ + if (result == COMPACT_PARTIAL_SKIPPED) + return true; + + return false; +} + + +bool compaction_zonelist_suitable(struct alloc_context *ac, int order, + int alloc_flags); + extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else -static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, +static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, enum migrate_mode mode, int *contended) @@ -73,7 +167,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order, +static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; @@ -88,6 +182,21 @@ static inline bool compaction_deferred(struct zone *zone, int order) return true; } +static inline bool compaction_made_progress(enum compact_result result) +{ + return false; +} + +static inline bool compaction_failed(enum compact_result result) +{ + return false; +} + +static inline bool compaction_withdrawn(enum compact_result result) +{ + return true; +} + static inline int kcompactd_run(int nid) { return 0; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 3d5202eda22f..e2949397c19b 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -142,6 +142,7 @@ #if GCC_VERSION >= 30400 #define __must_check __attribute__((warn_unused_result)) +#define __malloc __attribute__((__malloc__)) #endif #if GCC_VERSION >= 40000 diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b5ff9881bef8..793c0829e3a3 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -357,6 +357,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __deprecated_for_modules #endif +#ifndef __malloc +#define __malloc +#endif + /* * Allow us to avoid 'defined but not used' warnings on functions and data, * as well as force them to be emitted to the assembly file. diff --git a/include/linux/console.h b/include/linux/console.h index ea731af2451e..98c8615dc300 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -47,7 +47,7 @@ struct consw { int (*con_font_copy)(struct vc_data *, int); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); - int (*con_set_palette)(struct vc_data *, unsigned char *); + int (*con_set_palette)(struct vc_data *, const unsigned char *); int (*con_scrolldelta)(struct vc_data *, int); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); @@ -191,6 +191,8 @@ void vcs_remove_sysfs(int index); #ifdef CONFIG_VGA_CONSOLE extern bool vgacon_text_force(void); +#else +static inline bool vgacon_text_force(void) { return false; } #endif #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h new file mode 100644 index 000000000000..a978bb85599a --- /dev/null +++ b/include/linux/coresight-stm.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_CORESIGHT_STM_H_ +#define __LINUX_CORESIGHT_STM_H_ + +#include <uapi/linux/coresight-stm.h> + +#endif diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 786ad32631a6..07b83d32f66c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); +static inline struct cpuidle_device *cpuidle_get_device(void) +{return __this_cpu_read(cpuidle_devices); } #else static inline void disable_cpuidle(void) { } static inline bool cpuidle_not_available(struct cpuidle_driver *drv, @@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } +static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..bfc204e70338 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -16,26 +16,26 @@ #ifdef CONFIG_CPUSETS -extern struct static_key cpusets_enabled_key; +extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { - return static_key_false(&cpusets_enabled_key); + return static_branch_unlikely(&cpusets_enabled_key); } static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ - return static_key_count(&cpusets_enabled_key) + 1; + return static_key_count(&cpusets_enabled_key.key) + 1; } static inline void cpuset_inc(void) { - static_key_slow_inc(&cpusets_enabled_key); + static_branch_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_key_slow_dec(&cpusets_enabled_key); + static_branch_dec(&cpusets_enabled_key); } extern int cpuset_init(void); @@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); +extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); + if (cpusets_enabled()) + return __cpuset_node_allowed(node, gfp_mask); + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return cpuset_node_allowed(zone_to_nid(z), gfp_mask); + return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + if (cpusets_enabled()) + return __cpuset_zone_allowed(z, gfp_mask); + return true; } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -172,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { - return 1; + return true; } -static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return 1; + return true; +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return true; } static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 3849fce7ecfe..3873697ba21c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -34,9 +34,13 @@ void vmcore_cleanup(void); /* * Architecture code can redefine this if there are any special checks - * needed for 64-bit ELF vmcores. In case of 32-bit only architecture, - * this can be set to zero. + * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit + * only architecture, vmcore_elf64_check_arch can be set to zero. */ +#ifndef vmcore_elf32_check_arch +#define vmcore_elf32_check_arch(x) elf_check_arch(x) +#endif + #ifndef vmcore_elf64_check_arch #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 982a6c4a62f3..43d5f0b799c7 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -3,45 +3,62 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/radix-tree.h> #include <asm/pgtable.h> +/* We use lowest available exceptional entry bit for locking */ +#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) + ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, get_block_t, dio_iodone_t, int flags); -int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, - dax_iodone_t); -int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, - dax_iodone_t); +int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +void dax_wake_mapping_entry_waiter(struct address_space *mapping, + pgoff_t index, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); +void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); +int __dax_zero_page_range(struct block_device *bdev, sector_t sector, + unsigned int offset, unsigned int length); #else static inline struct page *read_dax_sector(struct block_device *bdev, sector_t n) { return ERR_PTR(-ENXIO); } +/* Shouldn't ever be called when dax is disabled. */ +static inline void dax_unlock_mapping_entry(struct address_space *mapping, + pgoff_t index) +{ + BUG(); +} +static inline int __dax_zero_page_range(struct block_device *bdev, + sector_t sector, unsigned int offset, unsigned int length) +{ + return -ENXIO; +} #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t, dax_iodone_t); + unsigned int flags, get_block_t); int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t, dax_iodone_t); + unsigned int flags, get_block_t); #else static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb, - dax_iodone_t di) + pmd_t *pmd, unsigned int flags, get_block_t gb) { return VM_FAULT_FALLBACK; } #define __dax_pmd_fault dax_pmd_fault #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) -#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) +#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) +#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f8506e8dd4d4..f53fa055021a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -10,6 +10,7 @@ #include <linux/cache.h> #include <linux/rcupdate.h> #include <linux/lockref.h> +#include <linux/stringhash.h> struct path; struct vfsmount; @@ -52,9 +53,6 @@ struct qstr { }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } -#define hashlen_hash(hashlen) ((u32) (hashlen)) -#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) -#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash)) struct dentry_stat_t { long nr_dentry; @@ -65,29 +63,6 @@ struct dentry_stat_t { }; extern struct dentry_stat_t dentry_stat; -/* Name hashing routines. Initial hash value */ -/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ -#define init_name_hash() 0 - -/* partial hash update function. Assume roughly 4 bits per character */ -static inline unsigned long -partial_name_hash(unsigned long c, unsigned long prevhash) -{ - return (prevhash + (c << 4) + (c >> 4)) * 11; -} - -/* - * Finally: cut down the number of bits to a int value (and try to avoid - * losing bits) - */ -static inline unsigned long end_name_hash(unsigned long hash) -{ - return (unsigned int) hash; -} - -/* Compute the hash for a name string. */ -extern unsigned int full_name_hash(const unsigned char *, unsigned int); - /* * Try to keep struct dentry aligned on 64 byte cachelines (this will * give reasonable cacheline footprint with larger lines without the @@ -237,6 +212,7 @@ struct dentry_operations { #define DCACHE_OP_REAL 0x08000000 #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ +#define DCACHE_DENTRY_CURSOR 0x20000000 extern seqlock_t rename_lock; @@ -600,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry, return inode; } +/** + * d_real_inode - Return the real inode + * @dentry: The dentry to query + * + * If dentry is on an union/overlay, then return the underlying, real inode. + * Otherwise return d_inode(). + */ +static inline struct inode *d_real_inode(struct dentry *dentry) +{ + return d_backing_inode(d_real(dentry)); +} + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 981e53ab84e8..1438e2322d5c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -19,9 +19,11 @@ #include <linux/seq_file.h> #include <linux/types.h> +#include <linux/compiler.h> struct device; struct file_operations; +struct srcu_struct; struct debugfs_blob_wrapper { void *data; @@ -41,14 +43,16 @@ struct debugfs_regset32 { extern struct dentry *arch_debugfs_dir; -#if defined(CONFIG_DEBUG_FS) +extern struct srcu_struct debugfs_srcu; -/* declared over in file.c */ -extern const struct file_operations debugfs_file_operations; +#if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); struct dentry *debugfs_create_file_size(const char *name, umode_t mode, struct dentry *parent, void *data, @@ -68,6 +72,31 @@ struct dentry *debugfs_create_automount(const char *name, void debugfs_remove(struct dentry *dentry); void debugfs_remove_recursive(struct dentry *dentry); +int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx) + __acquires(&debugfs_srcu); + +void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu); + +ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = generic_file_llseek, \ +} + struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -176,6 +205,20 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } +static inline int debugfs_use_file_start(const struct dentry *dentry, + int *srcu_idx) + __acquires(&debugfs_srcu) +{ + return 0; +} + +static inline void debugfs_use_file_finish(int srcu_idx) + __releases(&debugfs_srcu) +{ } + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + static const struct file_operations __fops = { 0 } + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 98ffcbd4888e..46056cb161fc 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -38,8 +38,10 @@ struct debug_obj { * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object + * @is_static_object return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check - * fails + * fails. All fixup functions must return true if fixup + * was successful, otherwise return false * @fixup_activate: fixup function, which is called when the activate check * fails * @fixup_destroy: fixup function, which is called when the destroy check @@ -51,12 +53,13 @@ struct debug_obj { */ struct debug_obj_descr { const char *name; - void *(*debug_hint) (void *addr); - int (*fixup_init) (void *addr, enum debug_obj_state state); - int (*fixup_activate) (void *addr, enum debug_obj_state state); - int (*fixup_destroy) (void *addr, enum debug_obj_state state); - int (*fixup_free) (void *addr, enum debug_obj_state state); - int (*fixup_assert_init)(void *addr, enum debug_obj_state state); + void *(*debug_hint)(void *addr); + bool (*is_static_object)(void *addr); + bool (*fixup_init)(void *addr, enum debug_obj_state state); + bool (*fixup_activate)(void *addr, enum debug_obj_state state); + bool (*fixup_destroy)(void *addr, enum debug_obj_state state); + bool (*fixup_free)(void *addr, enum debug_obj_state state); + bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c0a360e99f64..269521f143ac 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -1,3 +1,22 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H @@ -5,17 +24,62 @@ #include <linux/module.h> #include <linux/vmalloc.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +/* + * _devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated and chained using the sg_chain function then + * this function should be called only once on the first table + * @table: pointer to sg_table to free + */ +static inline void _devcd_free_sgtable(struct scatterlist *table) +{ + int i; + struct page *page; + struct scatterlist *iter; + struct scatterlist *delete_iter; + + /* free pages */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + page = sg_page(iter); + if (page) + __free_page(page); + } + + /* then free all chained tables */ + iter = table; + delete_iter = table; /* always points on a head of a table */ + while (!sg_is_last(iter)) { + iter++; + if (sg_is_chain(iter)) { + iter = sg_chain_ptr(iter); + kfree(delete_iter); + delete_iter = iter; + } + } + + /* free the last table */ + kfree(delete_iter); +} + + #ifdef CONFIG_DEV_COREDUMP -void dev_coredumpv(struct device *dev, const void *data, size_t datalen, +void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)); + void *data, size_t datalen), + void (*free)(void *data)); + +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp); #else -static inline void dev_coredumpv(struct device *dev, const void *data, +static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { vfree(data); @@ -23,13 +87,19 @@ static inline void dev_coredumpv(struct device *dev, const void *data, static inline void dev_coredumpm(struct device *dev, struct module *owner, - const void *data, size_t datalen, gfp_t gfp, + void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen), - void (*free)(const void *data)) + void *data, size_t datalen), + void (*free)(void *data)) { free(data); } + +static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + _devcd_free_sgtable(table); +} #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index b130304f9b1b..38f02814d53a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name); + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid); + int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); @@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id); extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; extern __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, - va_list ap); + va_list ap) __malloc; extern __printf(3, 4) -char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev, return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } extern void devm_kfree(struct device *dev, void *p); -extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); @@ -1288,8 +1288,11 @@ do { \ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else -#define dev_dbg_ratelimited(dev, fmt, ...) \ - no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ +} while (0) #endif #ifdef VERBOSE_DEBUG diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 5871f292b596..277ab9af9ac2 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -15,13 +15,12 @@ #include <linux/errno.h> -struct pts_fs_info; - #ifdef CONFIG_UNIX98_PTYS -/* Look up a pts fs info and get a ref to it */ -struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); -void devpts_put_ref(struct pts_fs_info *); +struct pts_fs_info; + +struct pts_fs_info *devpts_acquire(struct file *); +void devpts_release(struct pts_fs_info *); int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3fe90d494edb..4551c6f2a6c4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -112,19 +112,24 @@ struct dma_buf_ops { * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. + * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @vmapping_counter: used internally to refcnt the vmaps + * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. * @resv: reservation object linked to this dma-buf + * @poll: for userspace poll support + * @cb_excl: for userspace poll support + * @cb_shared: for userspace poll support */ struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; - /* mutex to serialize list manipulation, attach/detach and vmap/unmap */ struct mutex lock; unsigned vmapping_counter; void *vmap_ptr; @@ -188,9 +193,11 @@ struct dma_buf_export_info { /** * helper macro for exporters; zeros and fills in most common values + * + * @name: export-info name */ -#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ - struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ +#define DEFINE_DMA_BUF_EXPORT_INFO(name) \ + struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ .owner = THIS_MODULE } /** diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index fc481037478a..8443bbb5c071 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); * These implement the bulk of the relevant DMA mapping callbacks, but require * the arch code to take care of attributes and cache maintenance */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, - gfp_t gfp, int prot, dma_addr_t *handle, +struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, + struct dma_attrs *attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)); void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 9ea9aba28049..71c1b215ef66 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev); #ifndef arch_setup_dma_ops static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, struct iommu_ops *iommu, + u64 size, const struct iommu_ops *iommu, bool coherent) { } #endif diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 71456442ebe3..f2e538aaddad 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -27,6 +27,7 @@ struct dw_dma; * @regs: memory mapped I/O space * @clk: hclk clock * @dw: struct dw_dma that is filed by dw_dma_probe() + * @pdata: pointer to platform data */ struct dw_dma_chip { struct device *dev; @@ -34,10 +35,12 @@ struct dw_dma_chip { void __iomem *regs; struct clk *clk; struct dw_dma *dw; + + const struct dw_dma_platform_data *pdata; }; /* Export to the platform drivers */ -int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); +int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); /* DMA API extensions */ diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 34b98f276ed0..3ae300052553 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h @@ -41,6 +41,20 @@ struct xilinx_vdma_config { int ext_fsync; }; +/** + * enum xdma_ip_type: DMA IP type. + * + * XDMA_TYPE_AXIDMA: Axi dma ip. + * XDMA_TYPE_CDMA: Axi cdma ip. + * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +}; + int xilinx_vdma_channel_set_config(struct dma_chan *dchan, struct xilinx_vdma_config *cfg); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 017433712833..30de0197263a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( sg_dma_address(&sg) = buf; sg_dma_len(&sg) = len; + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags, NULL); } @@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, NULL); } @@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( enum dma_transfer_direction dir, unsigned long flags, struct rio_dma_ext *rio_ext) { + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, dir, flags, rio_ext); } @@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) + return NULL; + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, period_len, dir, flags); } @@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) + return NULL; + return chan->device->device_prep_interleaved_dma(chan, xt, flags); } @@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { - if (!chan || !chan->device) + if (!chan || !chan->device || !chan->device->device_prep_dma_memset) return NULL; return chan->device->device_prep_dma_memset(chan, dest, value, @@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { + if (!chan || !chan->device || !chan->device->device_prep_dma_sg) + return NULL; + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, src_sg, src_nents, flags); } diff --git a/include/linux/efi.h b/include/linux/efi.h index df7acb51f3cc..f196dd0b0f2f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -21,6 +21,7 @@ #include <linux/pfn.h> #include <linux/pstore.h> #include <linux/reboot.h> +#include <linux/uuid.h> #include <linux/screen_info.h> #include <asm/page.h> @@ -44,17 +45,10 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; - -typedef struct { - u8 b[16]; -} efi_guid_t; +typedef uuid_le efi_guid_t; #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ -((efi_guid_t) \ -{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ - (b) & 0xff, ((b) >> 8) & 0xff, \ - (c) & 0xff, ((c) >> 8) & 0xff, \ - (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) /* * Generic EFI table header @@ -1011,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc_in_map(m, md) \ for ((md) = (m)->map; \ - (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ + ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ (md) = (void *)(md) + (m)->desc_size) /** @@ -1117,7 +1111,7 @@ extern int efi_status_to_err(efi_status_t status); * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")) * not including trailing NUL */ -#define EFI_VARIABLE_GUID_LEN 36 +#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN /* * The type of search to perform when calling boottime->locate_handle diff --git a/include/linux/err.h b/include/linux/err.h index 56762ab41713..1e3558845e4c 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -18,7 +18,7 @@ #ifndef __ASSEMBLY__ -#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) +#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) static inline void * __must_check ERR_PTR(long error) { diff --git a/include/linux/errno.h b/include/linux/errno.h index 89627b9187f9..7ce9fb1b7d28 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -28,5 +28,6 @@ #define EBADTYPE 527 /* Type not supported by server */ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define ERECALLCONFLICT 530 /* conflict with recalled state */ #endif diff --git a/include/linux/export.h b/include/linux/export.h index 96e45ea463e7..2f9ccbe6a639 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -38,7 +38,7 @@ extern struct module __this_module; #ifdef CONFIG_MODULES -#ifndef __GENKSYMS__ +#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ @@ -53,7 +53,7 @@ extern struct module __this_module; #endif /* For every exported symbol, place a struct in the __ksymtab section */ -#define __EXPORT_SYMBOL(sym, sec) \ +#define ___EXPORT_SYMBOL(sym, sec) \ extern typeof(sym) sym; \ __CRC_SYMBOL(sym, sec) \ static const char __kstrtab_##sym[] \ @@ -65,6 +65,35 @@ extern struct module __this_module; __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ = { (unsigned long)&sym, __kstrtab_##sym } +#if defined(__KSYM_DEPS__) + +/* + * For fine grained build dependencies, we want to tell the build system + * about each possible exported symbol even if they're not actually exported. + * We use a string pattern that is unlikely to be valid code that the build + * system filters out from the preprocessor output (see ksym_dep_filter + * in scripts/Kbuild.include). + */ +#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <linux/kconfig.h> +#include <generated/autoksyms.h> + +#define __EXPORT_SYMBOL(sym, sec) \ + __cond_export_sym(sym, sec, config_enabled(__KSYM_##sym)) +#define __cond_export_sym(sym, sec, conf) \ + ___cond_export_sym(sym, sec, conf) +#define ___cond_export_sym(sym, sec, enabled) \ + __cond_export_sym_##enabled(sym, sec) +#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) +#define __cond_export_sym_0(sym, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL ___EXPORT_SYMBOL +#endif + #define EXPORT_SYMBOL(sym) \ __EXPORT_SYMBOL(sym, "") diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index b90e9bdbd1dd..4c02c6521fef 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -508,4 +508,6 @@ enum { F2FS_FT_MAX }; +#define S_SHIFT 12 + #endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index dfe88351341f..a964d076b4dc 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } /* drivers/video/fb_defio.c */ +int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, diff --git a/include/linux/fence.h b/include/linux/fence.h index 2b17698b60b8..2056e9fd0138 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -49,6 +49,8 @@ struct fence_cb; * @timestamp: Timestamp when the fence was signaled. * @status: Optional, only valid if < 0, must be set before calling * fence_signal, indicates that the fence has completed with an error. + * @child_list: list of children fences + * @active_list: list of active fences * * the flags member must be manipulated and read using the appropriate * atomic ops (bit_*), so taking the spinlock will not be needed most diff --git a/include/linux/fs.h b/include/linux/fs.h index 851390c8d75b..dd288148a6b1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); -typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); #define MAY_EXEC 0x00000001 #define MAY_WRITE 0x00000002 @@ -1730,7 +1729,8 @@ struct inode_operations { struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); - int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); + int (*setxattr) (struct dentry *, struct inode *, + const char *, const void *, size_t, int); ssize_t (*getxattr) (struct dentry *, struct inode *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); @@ -2352,14 +2352,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); -#ifdef CONFIG_FS_DAX -extern bool blkdev_dax_capable(struct block_device *bdev); -#else -static inline bool blkdev_dax_capable(struct block_device *bdev) -{ - return false; -} -#endif extern struct super_block *blockdev_superblock; @@ -2427,6 +2419,8 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, /* fs/char_dev.c */ #define CHRDEV_MAJOR_HASH_SIZE 255 +/* Marks the bottom of the first segment of free char majors */ +#define CHRDEV_MAJOR_DYN_END 234 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int __register_chrdev(unsigned int major, unsigned int baseminor, @@ -2634,15 +2628,34 @@ static inline void i_readcount_inc(struct inode *inode) #endif extern int do_pipe_flags(int *, int); +#define __kernel_read_file_id(id) \ + id(UNKNOWN, unknown) \ + id(FIRMWARE, firmware) \ + id(MODULE, kernel-module) \ + id(KEXEC_IMAGE, kexec-image) \ + id(KEXEC_INITRAMFS, kexec-initramfs) \ + id(POLICY, security-policy) \ + id(MAX_ID, ) + +#define __fid_enumify(ENUM, dummy) READING_ ## ENUM, +#define __fid_stringify(dummy, str) #str, + enum kernel_read_file_id { - READING_FIRMWARE = 1, - READING_MODULE, - READING_KEXEC_IMAGE, - READING_KEXEC_INITRAMFS, - READING_POLICY, - READING_MAX_ID + __kernel_read_file_id(__fid_enumify) +}; + +static const char * const kernel_read_file_str[] = { + __kernel_read_file_id(__fid_stringify) }; +static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if (id < 0 || id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, enum kernel_read_file_id); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 604e1526cd00..13ba552e6c09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -241,7 +241,7 @@ struct fscache_cache_ops { /* check the consistency between the backing cache and the FS-Cache * cookie */ - bool (*check_consistency)(struct fscache_operation *op); + int (*check_consistency)(struct fscache_operation *op); /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index 6027f6bbb061..cfa6cde25f8e 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -175,6 +175,7 @@ struct fscrypt_name { */ struct fscrypt_operations { int (*get_context)(struct inode *, void *, size_t); + int (*key_prefix)(struct inode *, u8 **); int (*prepare_context)(struct inode *); int (*set_context)(struct inode *, const void *, size_t, void *); int (*dummy_context)(struct inode *); diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 0023088b253b..3f9778cbc79d 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -39,6 +39,10 @@ #define FSL_IFC_VERSION_MASK 0x0F0F0000 #define FSL_IFC_VERSION_1_0_0 0x01000000 #define FSL_IFC_VERSION_1_1_0 0x01010000 +#define FSL_IFC_VERSION_2_0_0 0x02000000 + +#define PGOFFSET_64K (64*1024) +#define PGOFFSET_4K (4*1024) /* * CSPR - Chip Select Property Register @@ -723,20 +727,26 @@ struct fsl_ifc_nand { __be32 nand_evter_en; u32 res17[0x2]; __be32 nand_evter_intr_en; - u32 res18[0x2]; + __be32 nand_vol_addr_stat; + u32 res18; __be32 nand_erattr0; __be32 nand_erattr1; u32 res19[0x10]; __be32 nand_fsr; - u32 res20; - __be32 nand_eccstat[4]; - u32 res21[0x20]; + u32 res20[0x3]; + __be32 nand_eccstat[6]; + u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; __be32 nand_autoboot_trgr; u32 res23; __be32 nand_mdr; - u32 res24[0x5C]; + u32 res24[0x1C]; + __be32 nand_dll_lowcfg0; + __be32 nand_dll_lowcfg1; + u32 res25; + __be32 nand_dll_lowstat; + u32 res26[0x3c]; }; /* @@ -771,13 +781,12 @@ struct fsl_ifc_gpcm { __be32 gpcm_erattr1; __be32 gpcm_erattr2; __be32 gpcm_stat; - u32 res4[0x1F3]; }; /* * IFC Controller Registers */ -struct fsl_ifc_regs { +struct fsl_ifc_global { __be32 ifc_rev; u32 res1[0x2]; struct { @@ -803,21 +812,26 @@ struct fsl_ifc_regs { } ftim_cs[FSL_IFC_BANK_COUNT]; u32 res9[0x30]; __be32 rb_stat; - u32 res10[0x2]; + __be32 rb_map; + __be32 wb_map; __be32 ifc_gcr; - u32 res11[0x2]; + u32 res10[0x2]; __be32 cm_evter_stat; - u32 res12[0x2]; + u32 res11[0x2]; __be32 cm_evter_en; - u32 res13[0x2]; + u32 res12[0x2]; __be32 cm_evter_intr_en; - u32 res14[0x2]; + u32 res13[0x2]; __be32 cm_erattr0; __be32 cm_erattr1; - u32 res15[0x2]; + u32 res14[0x2]; __be32 ifc_ccr; __be32 ifc_csr; - u32 res16[0x2EB]; + __be32 ddr_ccr_low; +}; + + +struct fsl_ifc_runtime { struct fsl_ifc_nand ifc_nand; struct fsl_ifc_nor ifc_nor; struct fsl_ifc_gpcm ifc_gpcm; @@ -831,7 +845,8 @@ extern int fsl_ifc_find(phys_addr_t addr_base); struct fsl_ifc_ctrl { /* device info */ struct device *dev; - struct fsl_ifc_regs __iomem *regs; + struct fsl_ifc_global __iomem *gregs; + struct fsl_ifc_runtime __iomem *rregs; int irq; int nand_irq; spinlock_t lock; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1259e53d9296..29f917517299 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -359,8 +359,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); -/* run all the marks in a group, and flag them to be freed */ -extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_unmount_inodes(struct super_block *sb); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5c706765404a..359a8e4bd44d 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -14,6 +14,7 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/uuid.h> #ifdef CONFIG_BLOCK @@ -93,7 +94,7 @@ struct disk_stats { * Enough for the string representation of any kind of UUID plus NULL. * EFI UUID is 36 characters. MSDOS UUID is 11 characters. */ -#define PARTITION_META_INFO_UUIDLTH 37 +#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) struct partition_meta_info { char uuid[PARTITION_META_INFO_UUIDLTH]; @@ -228,27 +229,9 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part) return NULL; } -static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) -{ - int i; - for (i = 0; i < 16; ++i) { - *to++ = (hex_to_bin(*uuid_str) << 4) | - (hex_to_bin(*(uuid_str + 1))); - uuid_str += 2; - switch (i) { - case 3: - case 5: - case 7: - case 9: - uuid_str++; - continue; - } - } -} - static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) { - part_pack_uuid(uuid_str, to); + uuid_be_to_bin(uuid_str, (uuid_be *)to); return 0; } diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index dfd59d6bc6f0..c683996110b1 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -61,6 +61,7 @@ extern void irq_exit(void); #define nmi_enter() \ do { \ + printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ @@ -77,6 +78,7 @@ extern void irq_exit(void); preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ ftrace_nmi_exit(); \ lockdep_on(); \ + printk_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h index 79c52fa81cac..ad6fa21d977b 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -3,92 +3,94 @@ /* Fast hashing routine for ints, longs and pointers. (C) 2002 Nadia Yvette Chambers, IBM */ -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ - #include <asm/types.h> #include <linux/compiler.h> -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define GOLDEN_RATIO_PRIME_32 0x9e370001UL -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL - +/* + * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and + * fs/inode.c. It's not actually prime any more (the previous primes + * were actively bad for hashing), but the name remains. + */ #if BITS_PER_LONG == 32 -#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 #define hash_long(val, bits) hash_32(val, bits) #elif BITS_PER_LONG == 64 #define hash_long(val, bits) hash_64(val, bits) -#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 #else #error Wordsize not 32 or 64 #endif /* - * The above primes are actively bad for hashing, since they are - * too sparse. The 32-bit one is mostly ok, the 64-bit one causes - * real problems. Besides, the "prime" part is pointless for the - * multiplicative hash. + * This hash multiplies the input by a large odd number and takes the + * high bits. Since multiplication propagates changes to the most + * significant end only, it is essential that the high bits of the + * product be used for the hash value. + * + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf * * Although a random odd number will do, it turns out that the golden * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice - * properties. + * properties. (See Knuth vol 3, section 6.4, exercise 9.) * - * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. - * (See Knuth vol 3, section 6.4, exercise 9.) + * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, + * which is very slightly easier to multiply by and makes no + * difference to the hash distribution. */ #define GOLDEN_RATIO_32 0x61C88647 #define GOLDEN_RATIO_64 0x61C8864680B583EBull -static __always_inline u64 hash_64(u64 val, unsigned int bits) -{ - u64 hash = val; +#ifdef CONFIG_HAVE_ARCH_HASH +/* This header may use the GOLDEN_RATIO_xx constants */ +#include <asm/hash.h> +#endif -#if BITS_PER_LONG == 64 - hash = hash * GOLDEN_RATIO_64; -#else - /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ - u64 n = hash; - n <<= 18; - hash -= n; - n <<= 33; - hash -= n; - n <<= 3; - hash += n; - n <<= 3; - hash -= n; - n <<= 4; - hash += n; - n <<= 2; - hash += n; +/* + * The _generic versions exist only so lib/test_hash.c can compare + * the arch-optimized versions with the generic. + * + * Note that if you change these, any <asm/hash.h> that aren't updated + * to match need to have their HAVE_ARCH_* define values updated so the + * self-test will not false-positive. + */ +#ifndef HAVE_ARCH__HASH_32 +#define __hash_32 __hash_32_generic #endif +static inline u32 __hash_32_generic(u32 val) +{ + return val * GOLDEN_RATIO_32; +} +#ifndef HAVE_ARCH_HASH_32 +#define hash_32 hash_32_generic +#endif +static inline u32 hash_32_generic(u32 val, unsigned int bits) +{ /* High bits are more random, so use them. */ - return hash >> (64 - bits); + return __hash_32(val) >> (32 - bits); } -static inline u32 hash_32(u32 val, unsigned int bits) +#ifndef HAVE_ARCH_HASH_64 +#define hash_64 hash_64_generic +#endif +static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) { - /* On some cpus multiply is faster, on others gcc will do shifts */ - u32 hash = val * GOLDEN_RATIO_PRIME_32; - - /* High bits are more random, so use them. */ - return hash >> (32 - bits); +#if BITS_PER_LONG == 64 + /* 64x64-bit multiply is efficient on all 64-bit processors */ + return val * GOLDEN_RATIO_64 >> (64 - bits); +#else + /* Hash 64 bits using only 32x32-bit multiply. */ + return hash_32((u32)val ^ __hash_32(val >> 32), bits); +#endif } -static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) +static inline u32 hash_ptr(const void *ptr, unsigned int bits) { return hash_long((unsigned long)ptr, bits); } +/* This really should be called fold32_ptr; it does no hashing to speak of. */ static inline u32 hash32_ptr(const void *ptr) { unsigned long val = (unsigned long)ptr; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index d7b9e5346fba..419fb9e03447 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -28,9 +28,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb, extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); -extern bool move_huge_pmd(struct vm_area_struct *vma, - struct vm_area_struct *new_vma, - unsigned long old_addr, +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 7d953c2542a8..c26d4638f665 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, /* arch callback */ int __init alloc_bootmem_huge_page(struct hstate *h); +void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); struct hstate *size_to_hstate(unsigned long size); @@ -352,9 +353,7 @@ extern unsigned int default_hstate_idx; static inline struct hstate *hstate_inode(struct inode *i) { - struct hugetlbfs_sb_info *hsb; - hsb = HUGETLBFS_SB(i->i_sb); - return hsb->hstate; + return HUGETLBFS_SB(i->i_sb)->hstate; } static inline struct hstate *hstate_file(struct file *f) @@ -453,12 +452,12 @@ static inline pgoff_t basepage_index(struct page *page) extern void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); -static inline int hugepage_migration_supported(struct hstate *h) +static inline bool hugepage_migration_supported(struct hstate *h) { #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION return huge_page_shift(h) == PMD_SHIFT; #else - return 0; + return false; #endif } @@ -520,7 +519,7 @@ static inline pgoff_t basepage_index(struct page *page) return page->index; } #define dissolve_free_huge_pages(s, e) do {} while (0) -#define hugepage_migration_supported(h) 0 +#define hugepage_migration_supported(h) false static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 24154c26d469..063962f6dfc6 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -93,20 +93,17 @@ hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { - return; } static inline void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { - return; } static inline void hugetlb_cgroup_file_init(void) @@ -116,7 +113,6 @@ static inline void hugetlb_cgroup_file_init(void) static inline void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) { - return; } #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 2bb681fbeb35..a4e7ca0f3585 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -5,16 +5,16 @@ #include <linux/mm.h> -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else -static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { - return 0; + return false; } #endif diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index aa0fadce9308..b10954a66939 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -126,6 +126,8 @@ struct hv_ring_buffer_info { u32 ring_datasize; /* < ring_size */ u32 ring_data_startoffset; + u32 priv_write_index; + u32 priv_read_index; }; /* @@ -151,6 +153,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, *read = dsize - *write; } +static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, read; + + dsize = rbi->ring_datasize; + read_loc = rbi->ring_buffer->read_index; + write_loc = READ_ONCE(rbi->ring_buffer->write_index); + + read = write_loc >= read_loc ? (write_loc - read_loc) : + (dsize - read_loc) + write_loc; + + return read; +} + +static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = READ_ONCE(rbi->ring_buffer->read_index); + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} + /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -1091,7 +1120,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t min, resource_size_t max, resource_size_t size, resource_size_t align, bool fb_overlap_ok); - +void vmbus_free_mmio(resource_size_t start, resource_size_t size); int vmbus_cpu_number_to_vp_number(int cpu_number); u64 hv_do_hypercall(u64 control, void *input, void *output); @@ -1338,4 +1367,143 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, const uuid_le *shv_host_servie_id); +void vmbus_set_event(struct vmbus_channel *channel); + +/* Get the start of the ring buffer. */ +static inline void * +hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) +{ + return (void *)ring_info->ring_buffer->buffer; +} + +/* + * To optimize the flow management on the send-side, + * when the sender is blocked because of lack of + * sufficient space in the ring buffer, potential the + * consumer of the ring buffer can signal the producer. + * This is controlled by the following parameters: + * + * 1. pending_send_sz: This is the size in bytes that the + * producer is trying to send. + * 2. The feature bit feat_pending_send_sz set to indicate if + * the consumer of the ring will signal when the ring + * state transitions from being full to a state where + * there is room for the producer to send the pending packet. + */ + +static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +{ + u32 cur_write_sz; + u32 pending_sz; + + /* + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: + * If the reading of the pend_sz (in this function) + * were to be reordered and read before we commit the new read + * index (in the calling function) we could + * have a problem. If the host were to set the pending_sz after we + * have sampled pending_sz and go to sleep before we commit the + * read index, we could miss sending the interrupt. Issue a full + * memory barrier to address this. + */ + virt_mb(); + + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* If the other end is not blocked on write don't bother. */ + if (pending_sz == 0) + return false; + + cur_write_sz = hv_get_bytes_to_write(rbi); + + if (cur_write_sz >= pending_sz) + return true; + + return false; +} + +/* + * An API to support in-place processing of incoming VMBUS packets. + */ +#define VMBUS_PKT_TRAILER 8 + +static inline struct vmpacket_descriptor * +get_next_pkt_raw(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + struct vmpacket_descriptor *cur_desc; + u32 packetlen; + u32 dsize = ring_info->ring_datasize; + u32 delta = read_loc - ring_info->ring_buffer->read_index; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); + + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; + + if ((read_loc + sizeof(*cur_desc)) > dsize) + return NULL; + + cur_desc = ring_buffer + read_loc; + packetlen = cur_desc->len8 << 3; + + /* + * If the packet under consideration is wrapping around, + * return failure. + */ + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) + return NULL; + + return cur_desc; +} + +/* + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). + */ +static inline void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) + BUG(); + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; +} + +/* + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); + */ +static inline void commit_rd_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + if (hv_need_to_signal_on_read(ring_info)) + vmbus_set_event(channel); +} + + #endif /* _HYPERV_H */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index b5f9a007a3ab..d4c1d12f900d 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -27,22 +27,49 @@ #ifdef __KERNEL__ +#include <linux/bitops.h> + +struct i2c_mux_core { + struct i2c_adapter *parent; + struct device *dev; + bool mux_locked; + + void *priv; + + int (*select)(struct i2c_mux_core *, u32 chan_id); + int (*deselect)(struct i2c_mux_core *, u32 chan_id); + + int num_adapters; + int max_adapters; + struct i2c_adapter *adapter[0]; +}; + +struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, + struct device *dev, int max_adapters, + int sizeof_priv, u32 flags, + int (*select)(struct i2c_mux_core *, u32), + int (*deselect)(struct i2c_mux_core *, u32)); + +/* flags for i2c_mux_alloc */ +#define I2C_MUX_LOCKED BIT(0) + +static inline void *i2c_mux_priv(struct i2c_mux_core *muxc) +{ + return muxc->priv; +} + +struct i2c_adapter *i2c_root_adapter(struct device *dev); + /* - * Called to create a i2c bus on a multiplexed bus segment. - * The mux_dev and chan_id parameters are passed to the select - * and deselect callback functions to perform hardware-specific - * mux control. + * Called to create an i2c bus on a multiplexed bus segment. + * The chan_id parameter is passed to the select and deselect + * callback functions to perform hardware-specific mux control. */ -struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, - struct device *mux_dev, - void *mux_priv, u32 force_nr, u32 chan_id, - unsigned int class, - int (*select) (struct i2c_adapter *, - void *mux_dev, u32 chan_id), - int (*deselect) (struct i2c_adapter *, - void *mux_dev, u32 chan_id)); - -void i2c_del_mux_adapter(struct i2c_adapter *adap); +int i2c_mux_add_adapter(struct i2c_mux_core *muxc, + u32 force_nr, u32 chan_id, + unsigned int class); + +void i2c_mux_del_adapters(struct i2c_mux_core *muxc); #endif /* __KERNEL__ */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 200cf13b00f6..96a25ae14494 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -524,6 +524,7 @@ struct i2c_adapter { /* data fields that are valid for all devices */ struct rt_mutex bus_lock; + struct rt_mutex mux_lock; int timeout; /* in jiffies */ int retries; @@ -538,6 +539,10 @@ struct i2c_adapter { struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; + + void (*lock_bus)(struct i2c_adapter *, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -567,8 +572,44 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); /* Adapter locking functions, exported for shared pin cases */ -void i2c_lock_adapter(struct i2c_adapter *); -void i2c_unlock_adapter(struct i2c_adapter *); +#define I2C_LOCK_ROOT_ADAPTER BIT(0) +#define I2C_LOCK_SEGMENT BIT(1) + +/** + * i2c_lock_bus - Get exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT + * locks only this branch in the adapter tree + */ +static inline void +i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->lock_bus(adapter, flags); +} + +/** + * i2c_unlock_bus - Release exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT + * unlocks only this branch in the adapter tree + */ +static inline void +i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->unlock_bus(adapter, flags); +} + +static inline void +i2c_lock_adapter(struct i2c_adapter *adapter) +{ + i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} + +static inline void +i2c_unlock_adapter(struct i2c_adapter *adapter) +{ + i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); +} /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ @@ -654,6 +695,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) return adap->nr; } +static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) +{ + return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); +} + /** * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 2ec3ad58e8a0..70a5164f4728 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -83,10 +83,12 @@ struct iio_buffer_access_funcs { * @access: [DRIVER] buffer access functions associated with the * implementation. * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. + * @buffer_group: [INTERN] attributes of the buffer group * @scan_el_group: [DRIVER] attribute group for those attributes not * created from the iio_chan_info array. * @pollq: [INTERN] wait queue to allow for polling on the buffer. * @stufftoread: [INTERN] flag to indicate new data. + * @attrs: [INTERN] standard attributes of the buffer * @demux_list: [INTERN] list of operations required to demux the scan. * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. * @buffer_list: [INTERN] entry in the devices list of current buffers. diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 6670c3d25c58..99403b19092f 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -37,6 +37,7 @@ #define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 #define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 #define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 +#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 7 @@ -121,6 +122,9 @@ struct st_sensor_bdu { * @mask_int2: mask to enable/disable IRQ on INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. + * @addr_od: address to enable/disable Open Drain on the INT lines. + * @mask_od: mask to enable/disable Open Drain on the INT lines. + * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq { u8 mask_int2; u8 addr_ihl; u8 mask_ihl; + u8 addr_od; + u8 mask_od; + u8 addr_stat_drdy; struct { u8 en_addr; u8 en_mask; @@ -212,9 +219,12 @@ struct st_sensor_settings { * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). + * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @get_irq_data_ready: Function to get the IRQ used for data ready signal. * @tf: Transfer function structure used by I/O operations. * @tb: Transfer buffers and mutex used by I/O operations. + * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. + * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. */ struct st_sensor_data { struct device *dev; @@ -233,11 +243,15 @@ struct st_sensor_data { unsigned int num_data_channels; u8 drdy_int_pin; + bool int_pin_open_drain; unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); const struct st_sensor_transfer_function *tf; struct st_sensor_transfer_buffer tb; + + bool hw_irq_trigger; + s64 hw_timestamp; }; #ifdef CONFIG_IIO_BUFFER @@ -251,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops); void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); - +int st_sensors_validate_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); #else static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) @@ -262,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) { return; } +#define st_sensors_validate_device NULL #endif int st_sensors_init_sensor(struct iio_dev *indio_dev, diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index fad58671c49e..3d672f72e7ec 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev, void iio_channel_release(struct iio_channel *chan); /** + * devm_iio_channel_get() - Resource managed version of iio_channel_get(). + * @dev: Pointer to consumer device. Device name must match + * the name of the device as provided in the iio_map + * with which the desired provider to consumer mapping + * was registered. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns valid pointer for iio channel. + * + * The allocated iio channel is automatically released when the device is + * unbound. + */ +struct iio_channel *devm_iio_channel_get(struct device *dev, + const char *consumer_channel); +/** + * devm_iio_channel_release() - Resource managed version of + * iio_channel_release(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: The channel to be released. + */ +void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); + +/** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * @@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev); */ void iio_channel_release_all(struct iio_channel *chan); +/** + * devm_iio_channel_get_all() - Resource managed version of + * iio_channel_get_all(). + * @dev: Pointer to consumer device. + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns an array of iio_channel structures terminated with one with + * null iio_dev pointer. + * + * This function is used by fairly generic consumers to get all the + * channels registered as having this consumer. + * + * The allocated iio channels are automatically released when the device is + * unbounded. + */ +struct iio_channel *devm_iio_channel_get_all(struct device *dev); + +/** + * devm_iio_channel_release_all() - Resource managed version of + * iio_channel_release_all(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: Array channel to be released. + */ +void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); + struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b2b16772c651..7c29cb0124ae 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev, } /** + * struct iio_mount_matrix - iio mounting matrix + * @rotation: 3 dimensional space rotation matrix defining sensor alignment with + * main hardware + */ +struct iio_mount_matrix { + const char *rotation[9]; +}; + +ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, + const struct iio_chan_spec *chan, char *buf); +int of_iio_read_mount_matrix(const struct device *dev, const char *propname, + struct iio_mount_matrix *matrix); + +typedef const struct iio_mount_matrix * + (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); + +/** + * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute + * @_shared: Whether the attribute is shared between all channels + * @_get: Pointer to an iio_get_mount_matrix_t accessor + */ +#define IIO_MOUNT_MATRIX(_shared, _get) \ +{ \ + .name = "mount_matrix", \ + .shared = (_shared), \ + .read = iio_show_mount_matrix, \ + .private = (uintptr_t)(_get), \ +} + +/** * struct iio_event_spec - specification for a channel event * @type: Type of the event * @dir: Direction of the event @@ -527,6 +558,8 @@ void iio_device_unregister(struct iio_dev *indio_dev); int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); +int iio_device_claim_direct_mode(struct iio_dev *indio_dev); +void iio_device_release_direct_mode(struct iio_dev *indio_dev); extern struct bus_type iio_bus_type; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index fa2d01ef8f55..360da7d18a3d 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -41,6 +41,7 @@ struct adis_data { unsigned int diag_stat_reg; unsigned int self_test_mask; + bool self_test_no_autoclear; unsigned int startup_delay; const char * const *status_error_msgs; diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h new file mode 100644 index 000000000000..c8400959d197 --- /dev/null +++ b/include/linux/iio/magnetometer/ak8975.h @@ -0,0 +1,16 @@ +#ifndef __IIO_MAGNETOMETER_AK8975_H__ +#define __IIO_MAGNETOMETER_AK8975_H__ + +#include <linux/iio/iio.h> + +/** + * struct ak8975_platform_data - AK8975 magnetometer driver platform data + * @eoc_gpio: data ready event gpio + * @orientation: mounting matrix relative to main hardware + */ +struct ak8975_platform_data { + int eoc_gpio; + struct iio_mount_matrix orientation; +}; + +#endif diff --git a/include/linux/ima.h b/include/linux/ima.h index e6516cbbe9bf..0eb7c2e7f0d6 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -21,6 +21,7 @@ extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); +extern void ima_post_path_mknod(struct dentry *dentry); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -54,6 +55,11 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, return 0; } +static inline void ima_post_path_mknod(struct dentry *dentry) +{ + return; +} + #endif /* CONFIG_IMA */ #ifdef CONFIG_IMA_APPRAISE diff --git a/include/linux/init_task.h b/include/linux/init_task.h index f2cb8d45513d..f8834f820ec2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -190,7 +190,7 @@ extern struct task_group root_task_group; #define INIT_TASK(tsk) \ { \ .state = 0, \ - .stack = &init_thread_info, \ + .stack = init_stack, \ .usage = ATOMIC_INIT(2), \ .flags = PF_KTHREAD, \ .prio = MAX_PRIO-20, \ diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 11d7e840d913..defcc4644ce3 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) writel(val, addr); } +static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl_relaxed(p + 1); + low = readl_relaxed(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val >> 32, addr + 4); + writel_relaxed(val, addr); +} + #ifndef readq #define readq hi_lo_readq #endif @@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) #define writeq hi_lo_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed hi_lo_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed hi_lo_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index 1a4315f97360..084461a4e5ab 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) writel(val >> 32, addr + 4); } +static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl_relaxed(p); + high = readl_relaxed(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val, addr); + writel_relaxed(val >> 32, addr + 4); +} + #ifndef readq #define readq lo_hi_readq #endif @@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) #define writeq lo_hi_writeq #endif +#ifndef readq_relaxed +#define readq_relaxed lo_hi_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed lo_hi_writeq_relaxed +#endif + #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ef7a6ecd8584..664683aedcce 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -30,6 +30,7 @@ #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ struct iommu_ops; struct iommu_group; @@ -78,6 +79,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_ops *ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; @@ -155,8 +157,7 @@ struct iommu_dm_region { * @domain_set_windows: Set the number of windows for a domain * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping - * @pgsize_bitmap: bitmap of supported page sizes - * @priv: per-instance data private to the iommu driver + * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -198,7 +199,6 @@ struct iommu_ops { int (*of_xlate)(struct device *dev, struct of_phandle_args *args); unsigned long pgsize_bitmap; - void *priv; }; #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 0b65543dc6cf..6230064d7f95 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -26,6 +26,9 @@ struct resource { /* * IO resources have these defined flags. + * + * PCI devices expose these flags to userspace in the "resource" sysfs file, + * so don't move them. */ #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ @@ -110,6 +113,7 @@ struct resource { /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ +#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ /* * I/O Resource Descriptors diff --git a/include/linux/iova.h b/include/linux/iova.h index 92f7177db2ce..f27bb2c62fca 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -19,8 +19,21 @@ /* iova structure */ struct iova { struct rb_node node; - unsigned long pfn_hi; /* IOMMU dish out addr hi */ - unsigned long pfn_lo; /* IOMMU dish out addr lo */ + unsigned long pfn_hi; /* Highest allocated pfn */ + unsigned long pfn_lo; /* Lowest allocated pfn */ +}; + +struct iova_magazine; +struct iova_cpu_rcache; + +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ + +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; }; /* holds all the iova translations for a domain */ @@ -31,6 +44,7 @@ struct iova_domain { unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; + struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ }; static inline unsigned long iova_size(struct iova *iova) @@ -78,6 +92,10 @@ void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned); +void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, + unsigned long size); +unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); @@ -87,5 +105,6 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); +void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #endif diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 1551b5b2f4c2..f0f5d2671509 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -34,7 +34,7 @@ struct irq_bypass_consumer; /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -60,7 +60,7 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer + * @token: opaque token to match between producer and consumer (non-NULL) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h new file mode 100644 index 000000000000..c647b0547bcd --- /dev/null +++ b/include/linux/irqchip/arm-gic-common.h @@ -0,0 +1,34 @@ +/* + * include/linux/irqchip/arm-gic-common.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H +#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H + +#include <linux/types.h> +#include <linux/ioport.h> + +enum gic_type { + GIC_V2, + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* Virtual control interface */ + struct resource vctrl; +}; + +const struct gic_kvm_info *gic_get_kvm_info(void); + +#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9e6fdd33bdb2..dc493e0f0ff7 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -273,6 +273,12 @@ #define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) @@ -299,12 +305,12 @@ #define ICC_SGI1R_AFFINITY_1_SHIFT 16 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_SGI_ID_SHIFT 24 -#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) #define ICC_SGI1R_AFFINITY_2_SHIFT 32 -#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 #define ICC_SGI1R_AFFINITY_3_SHIFT 48 -#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) #include <asm/arch_gicv3.h> diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 9c940263ca23..fd051855539b 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -33,6 +33,7 @@ #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 +#define GIC_DIST_IIDR 0x008 #define GIC_DIST_IGROUP 0x080 #define GIC_DIST_ENABLE_SET 0x100 #define GIC_DIST_ENABLE_CLEAR 0x180 @@ -76,6 +77,7 @@ #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) +#define GICH_LR_PRIORITY_SHIFT 23 #define GICH_LR_STATE (3 << 28) #define GICH_LR_PENDING_BIT (1 << 28) #define GICH_LR_ACTIVE_BIT (1 << 29) diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 80f89e4a29ac..81f930b0bca9 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -103,6 +103,7 @@ #define GIC_VPE_SWINT0_MAP_OFS 0x0054 #define GIC_VPE_SWINT1_MAP_OFS 0x0058 #define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VP_IDENT_OFS 0x0088 #define GIC_VPE_WD_CONFIG0_OFS 0x0090 #define GIC_VPE_WD_COUNT0_OFS 0x0094 #define GIC_VPE_WD_INITIAL0_OFS 0x0098 @@ -211,6 +212,10 @@ #define GIC_VPE_SMASK_FDC_SHF 6 #define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF) +/* GIC_VP_IDENT fields */ +#define GIC_VP_IDENT_VCNUM_SHF 0 +#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF) + /* GIC nomenclature for Core Interrupt Pins. */ #define GIC_CPU_INT0 0 /* Core Interrupt 2 */ #define GIC_CPU_INT1 1 /* . */ @@ -278,4 +283,16 @@ static inline int gic_get_usm_range(struct resource *gic_usm_res) #endif /* CONFIG_MIPS_GIC */ +/** + * gic_read_local_vp_id() - read the local VPs VCNUM + * + * Read the VCNUM of the local VP from the GIC_VP_IDENT register and + * return it to the caller. This ID should be used to refer to the VP + * via the GICs VP-other region, or when calculating an offset to a + * bit representing the VP in interrupt masks. + * + * Return: The VCNUM value for the local VP. + */ +extern unsigned gic_read_local_vp_id(void); + #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/isa.h b/include/linux/isa.h index b0270e3814c8..f2d0258414cf 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h @@ -6,6 +6,7 @@ #define __LINUX_ISA_H #include <linux/device.h> +#include <linux/errno.h> #include <linux/kernel.h> struct isa_driver { @@ -22,13 +23,13 @@ struct isa_driver { #define to_isa_driver(x) container_of((x), struct isa_driver, driver) -#ifdef CONFIG_ISA +#ifdef CONFIG_ISA_BUS_API int isa_register_driver(struct isa_driver *, unsigned int); void isa_unregister_driver(struct isa_driver *); #else static inline int isa_register_driver(struct isa_driver *d, unsigned int i) { - return 0; + return -ENODEV; } static inline void isa_unregister_driver(struct isa_driver *d) @@ -36,4 +37,36 @@ static inline void isa_unregister_driver(struct isa_driver *d) } #endif +/** + * module_isa_driver() - Helper macro for registering a ISA driver + * @__isa_driver: isa_driver struct + * @__num_isa_dev: number of devices to register + * + * Helper macro for ISA drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate code. Each module may only + * use this macro once, and calling it replaces module_init and module_exit. + */ +#define module_isa_driver(__isa_driver, __num_isa_dev) \ +static int __init __isa_driver##_init(void) \ +{ \ + return isa_register_driver(&(__isa_driver), __num_isa_dev); \ +} \ +module_init(__isa_driver##_init); \ +static void __exit __isa_driver##_exit(void) \ +{ \ + isa_unregister_driver(&(__isa_driver)); \ +} \ +module_exit(__isa_driver##_exit); + +/** + * max_num_isa_dev() - Maximum possible number registered of an ISA device + * @__ida_dev_ext: ISA device address extent + * + * The highest base address possible for an ISA device is 0x3FF; this results in + * 1024 possible base addresses. Dividing the number of possible base addresses + * by the address extent taken by each device results in the maximum number of + * devices on a system. + */ +#define max_num_isa_dev(__isa_dev_ext) (1024 / __isa_dev_ext) + #endif /* __LINUX_ISA_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fd1083c46c61..efb232c5f668 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -403,11 +403,19 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) /* Flags in jbd_inode->i_flags */ #define __JI_COMMIT_RUNNING 0 -/* Commit of the inode data in progress. We use this flag to protect us from +#define __JI_WRITE_DATA 1 +#define __JI_WAIT_DATA 2 + +/* + * Commit of the inode data in progress. We use this flag to protect us from * concurrent deletion of inode. We cannot use reference to inode for this * since we cannot afford doing last iput() on behalf of kjournald */ #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) +/* Write allocated dirty buffers in this inode before commit */ +#define JI_WRITE_DATA (1 << __JI_WRITE_DATA) +/* Wait for outstanding data writes for this inode before commit */ +#define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** * struct jbd_inode is the structure linking inodes in ordered mode @@ -781,9 +789,6 @@ jbd2_time_diff(unsigned long start, unsigned long end) * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the * number that will fit in j_blocksize * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history: Buffer storing the transactions statistics history - * @j_history_max: Maximum number of transactions in the statistics history - * @j_history_cur: Current number of transactions in the statistics history * @j_history_lock: Protect the transactions statistics history * @j_proc_entry: procfs entry for the jbd statistics directory * @j_stats: Overall statistics @@ -1270,7 +1275,8 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..68904469fba1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -117,13 +117,18 @@ struct module; #include <linux/atomic.h> +#ifdef HAVE_JUMP_LABEL + static inline int static_key_count(struct static_key *key) { - return atomic_read(&key->enabled); + /* + * -1 means the first static_key_slow_inc() is in progress. + * static_key_enabled() must return true, so return 1 here. + */ + int n = atomic_read(&key->enabled); + return n >= 0 ? n : 1; } -#ifdef HAVE_JUMP_LABEL - #define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_TRUE 1UL #define JUMP_TYPE_MASK 1UL @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod); #else /* !HAVE_JUMP_LABEL */ +static inline int static_key_count(struct static_key *key) +{ + return atomic_read(&key->enabled); +} + static __always_inline void jump_label_init(void) { static_key_initialized = true; diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h new file mode 100644 index 000000000000..b7f8aced7870 --- /dev/null +++ b/include/linux/kasan-checks.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_KASAN_CHECKS_H +#define _LINUX_KASAN_CHECKS_H + +#ifdef CONFIG_KASAN +void kasan_check_read(const void *p, unsigned int size); +void kasan_check_write(const void *p, unsigned int size); +#else +static inline void kasan_check_read(const void *p, unsigned int size) { } +static inline void kasan_check_write(const void *p, unsigned int size) { } +#endif + +#endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 737371b56044..ac4b3c46a84d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -50,6 +50,8 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_destroy(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); @@ -57,13 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kfree_large(const void *ptr); -void kasan_kfree(void *ptr); +void kasan_poison_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); -void kasan_slab_free(struct kmem_cache *s, void *object); +bool kasan_slab_free(struct kmem_cache *s, void *object); struct kasan_cache { int alloc_meta_offset; @@ -73,6 +75,9 @@ struct kasan_cache { int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); +size_t ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } + #else /* CONFIG_KASAN */ static inline void kasan_unpoison_shadow(const void *address, size_t size) {} @@ -88,6 +93,8 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_destroy(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, @@ -97,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache, static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} static inline void kasan_kfree_large(const void *ptr) {} -static inline void kasan_kfree(void *ptr) {} +static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) {} static inline void kasan_krealloc(const void *object, size_t new_size, @@ -105,11 +112,16 @@ static inline void kasan_krealloc(const void *object, size_t new_size, static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) {} -static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} +static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +{ + return false; +} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} +static inline void kasan_unpoison_slab(const void *ptr) { } + #endif /* CONFIG_KASAN */ #endif /* LINUX_KASAN_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f7775e229b0..94aa10ffe156 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -53,6 +53,13 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ +} \ +) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be @@ -412,9 +419,9 @@ extern __printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...); extern __printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); -extern __printf(2, 3) +extern __printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...); -extern __printf(2, 0) +extern __printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); extern __printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30f089ebe0a4..96356ef012de 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -179,6 +179,7 @@ struct kernfs_open_file { /* private fields, do not use outside kernfs proper */ struct mutex mutex; + struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 2cc643c6e870..e8acb2b43dd9 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -230,8 +230,6 @@ extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); void crash_save_cpu(struct pt_regs *regs, int cpu); void crash_save_vmcoreinfo(void); -void crash_map_reserved_pages(void); -void crash_unmap_reserved_pages(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) void vmcoreinfo_append_str(const char *fmt, ...); @@ -317,6 +315,8 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); +void arch_kexec_protect_crashkres(void); +void arch_kexec_unprotect_crashkres(void); #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 7463355a198b..eaee981c5558 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -45,7 +45,6 @@ struct key_preparsed_payload { size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time_t expiry; /* Expiry time of key */ - bool trusted; /* True if key is trusted */ }; typedef int (*request_key_actor_t)(struct key_construction *key, diff --git a/include/linux/key.h b/include/linux/key.h index 5f5b1129dc92..722914798f37 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -173,11 +173,9 @@ struct key { #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ -#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ -#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ -#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */ -#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */ -#define KEY_FLAG_KEEP 12 /* set if key should not be removed */ +#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ +#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 10 /* set if key should not be removed */ /* the key type and key description string * - the desc is used to match a key against search criteria @@ -205,6 +203,20 @@ struct key { }; int reject_error; }; + + /* This is set on a keyring to restrict the addition of a link to a key + * to it. If this method isn't provided then it is assumed that the + * keyring is open to any addition. It is ignored for non-keyring + * keys. + * + * This is intended for use with rings of trusted keys whereby addition + * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION + * overrides this, allowing the kernel to add extra keys without + * restriction. + */ + int (*restrict_link)(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); }; extern struct key *key_alloc(struct key_type *type, @@ -212,14 +224,17 @@ extern struct key *key_alloc(struct key_type *type, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, - unsigned long flags); + unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *)); -#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ -#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ -#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ -#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ -#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */ +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ +#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); @@ -288,8 +303,15 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid const struct cred *cred, key_perm_t perm, unsigned long flags, + int (*restrict_link)(struct key *, + const struct key_type *, + const union key_payload *), struct key *dest); +extern int restrict_link_reject(struct key *keyring, + const struct key_type *type, + const union key_payload *payload); + extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5276fe0916fc..1c9c973a7dd9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -35,6 +35,10 @@ #include <asm/kvm_host.h> +#ifndef KVM_MAX_VCPU_ID +#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#endif + /* * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in @@ -225,6 +229,7 @@ struct kvm_vcpu { sigset_t sigset; struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; + bool valid_wakeup; #ifdef CONFIG_HAS_IOMEM int mmio_needed; @@ -407,6 +412,8 @@ struct kvm { #endif long tlbs_dirty; struct list_head devices; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; }; #define kvm_err(fmt, ...) \ @@ -447,12 +454,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu = NULL; int i; - if (id < 0 || id >= KVM_MAX_VCPUS) + if (id < 0) return NULL; - vcpu = kvm_get_vcpu(kvm, id); + if (id < KVM_MAX_VCPUS) + vcpu = kvm_get_vcpu(kvm, id); if (vcpu && vcpu->vcpu_id == id) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -651,6 +659,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); +void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -984,6 +993,11 @@ enum kvm_stat_kind { KVM_STAT_VCPU, }; +struct kvm_stat_data { + int offset; + struct kvm *kvm; +}; + struct kvm_stats_debugfs_item { const char *name; int offset; @@ -1091,6 +1105,11 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { + /* + * Ensure the rest of the request is published to kvm_check_request's + * caller. Paired with the smp_mb__after_atomic in kvm_check_request. + */ + smp_wmb(); set_bit(req, &vcpu->requests); } @@ -1098,6 +1117,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { if (test_bit(req, &vcpu->requests)) { clear_bit(req, &vcpu->requests); + + /* + * Ensure the rest of the request is visible to kvm_check_request's + * caller. Paired with the smp_wmb in kvm_make_request. + */ + smp_mb__after_atomic(); return true; } else { return false; @@ -1169,6 +1194,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, @@ -1179,4 +1205,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS +/* If we wakeup during the poll time, was it a sucessful poll? */ +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return vcpu->valid_wakeup; +} + +#else +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return true; +} +#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ + #endif diff --git a/include/linux/leds.h b/include/linux/leds.h index d2b13066e781..e5e7f2e80a54 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -42,15 +42,16 @@ struct led_classdev { #define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) -#define LED_BLINK_ONESHOT (1 << 17) -#define LED_BLINK_ONESHOT_STOP (1 << 18) -#define LED_BLINK_INVERT (1 << 19) -#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) -#define LED_BLINK_DISABLE (1 << 21) -#define LED_SYSFS_DISABLE (1 << 22) -#define LED_DEV_CAP_FLASH (1 << 23) -#define LED_HW_PLUGGABLE (1 << 24) -#define LED_PANIC_INDICATOR (1 << 25) +#define LED_BLINK_SW (1 << 17) +#define LED_BLINK_ONESHOT (1 << 18) +#define LED_BLINK_ONESHOT_STOP (1 << 19) +#define LED_BLINK_INVERT (1 << 20) +#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) +#define LED_BLINK_DISABLE (1 << 22) +#define LED_SYSFS_DISABLE (1 << 23) +#define LED_DEV_CAP_FLASH (1 << 24) +#define LED_HW_PLUGGABLE (1 << 25) +#define LED_PANIC_INDICATOR (1 << 26) /* Set LED brightness level * Must not sleep. Use brightness_set_blocking for drivers @@ -72,8 +73,8 @@ struct led_classdev { * and if both are zero then a sensible default should be chosen. * The call should adjust the timings in that case and if it can't * match the values specified exactly. - * Deactivate blinking again when the brightness is set to a fixed - * value via the brightness_set() callback. + * Deactivate blinking again when the brightness is set to LED_OFF + * via the brightness_set() callback. */ int (*blink_set)(struct led_classdev *led_cdev, unsigned long *delay_on, diff --git a/include/linux/libata.h b/include/linux/libata.h index 2c4ebef79d0c..d15c19e331d1 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -180,6 +180,8 @@ enum { ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ + ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ + ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -191,7 +193,8 @@ enum { ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ ATA_DEV_ZAC = 9, /* ZAC device */ - ATA_DEV_NONE = 10, /* no device */ + ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */ + ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -727,6 +730,13 @@ struct ata_device { /* NCQ send and receive log subcommand support */ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE]; + u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE]; + + /* ZAC zone configuration */ + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; /* error history */ int spdn_cnt; @@ -1523,7 +1533,8 @@ static inline unsigned int ata_class_enabled(unsigned int class) static inline unsigned int ata_class_disabled(unsigned int class) { return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || - class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP; + class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP || + class == ATA_DEV_ZAC_UNSUP; } static inline unsigned int ata_class_absent(unsigned int class) @@ -1641,6 +1652,26 @@ static inline bool ata_fpdma_dsm_supported(struct ata_device *dev) ATA_LOG_NCQ_SEND_RECV_DSM_TRIM); } +static inline bool ata_fpdma_read_log_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev) +{ + return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT); +} + static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) { qc->tf.ctl |= ATA_NIEN; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 833867b9ddc2..0c3c30cbbea5 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -27,7 +27,7 @@ enum { /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, ND_CMD_MAX_ELEM = 5, - ND_CMD_MAX_ENVELOPE = 16, + ND_CMD_MAX_ENVELOPE = 256, ND_MAX_MAPPINGS = 32, /* region flag indicating to direct-map persistent memory by default */ @@ -68,7 +68,7 @@ struct nd_mapping { struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long dsm_mask; + unsigned long cmd_mask; char *provider_name; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); @@ -130,10 +130,11 @@ struct nd_region *to_nd_region(struct device *dev); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); +unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, - unsigned long *dsm_mask); + unsigned long cmd_mask); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 512fd000562b..7ae397669d8b 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1805,7 +1805,6 @@ struct security_hook_heads { struct list_head tun_dev_attach_queue; struct list_head tun_dev_attach; struct list_head tun_dev_open; - struct list_head skb_owned_by; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM struct list_head xfrm_policy_alloc_security; @@ -1894,5 +1893,10 @@ extern void __init yama_add_hooks(void); #else static inline void __init yama_add_hooks(void) { } #endif +#ifdef CONFIG_SECURITY_LOADPIN +void __init loadpin_add_hooks(void); +#else +static inline void loadpin_add_hooks(void) { }; +#endif #endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/mcb.h b/include/linux/mcb.h index ed06e15a36aa..ead13d233a97 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -15,22 +15,30 @@ #include <linux/device.h> #include <linux/irqreturn.h> +#define CHAMELEON_FILENAME_LEN 12 + struct mcb_driver; struct mcb_device; /** * struct mcb_bus - MEN Chameleon Bus * - * @dev: pointer to carrier device - * @children: the child busses + * @dev: bus device + * @carrier: pointer to carrier device * @bus_nr: mcb bus number * @get_irq: callback to get IRQ number + * @revision: the FPGA's revision number + * @model: the FPGA's model number + * @filename: the FPGA's name */ struct mcb_bus { - struct list_head children; struct device dev; struct device *carrier; int bus_nr; + u8 revision; + char model; + u8 minor; + char name[CHAMELEON_FILENAME_LEN + 1]; int (*get_irq)(struct mcb_device *dev); }; #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1191d79aa495..a805474df4ab 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -415,25 +415,6 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) return mz->lru_size[lru]; } -static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - unsigned long inactive_ratio; - unsigned long inactive; - unsigned long active; - unsigned long gb; - - inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); - - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); - else - inactive_ratio = 1; - - return inactive * inactive_ratio < active; -} - void mem_cgroup_handle_over_high(void); void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, @@ -646,24 +627,12 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return true; } -static inline bool -mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - return true; -} - static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { return 0; } -static inline void -mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, - int increment) -{ -} - static inline unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index adbef586e696..5145620ba48a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -182,7 +182,7 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE -extern void register_page_bootmem_info_node(struct pglist_data *pgdat); +extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); #else static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { @@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {} #ifdef CONFIG_MEMORY_HOTREMOVE -extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); +extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern void remove_memory(int nid, u64 start, u64 size); #else -static inline int is_mem_section_removable(unsigned long pfn, +static inline bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) { - return 0; + return false; } static inline void try_offline_node(int nid) {} diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 2696c1f05ed1..4429d255c8ab 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol); extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) +static inline bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) - return 0; + return false; #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION if (vma->vm_flags & VM_HUGETLB) - return 0; + return false; #endif /* @@ -190,8 +190,8 @@ static inline int vma_migratable(struct vm_area_struct *vma) if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) - return 0; - return 1; + return false; + return true; } extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); @@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p) { } +static inline struct mempolicy * +mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +{ + return NULL; +} + #define vma_policy(vma) NULL static inline int diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 69b6951e8fd2..b1086c936507 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,7 @@ #define _LINUX_MEMPOOL_H #include <linux/wait.h> +#include <linux/compiler.h> struct kmem_cache; @@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); -extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; extern void mempool_free(void *element, mempool_t *pool); /* diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d82e7d51372b..0be4982f08fe 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -20,6 +20,7 @@ enum { AXP221_ID, AXP223_ID, AXP288_ID, + AXP809_ID, NR_AXP20X_VARIANTS, }; @@ -264,6 +265,29 @@ enum { AXP22X_REG_ID_MAX, }; +enum { + AXP809_DCDC1 = 0, + AXP809_DCDC2, + AXP809_DCDC3, + AXP809_DCDC4, + AXP809_DCDC5, + AXP809_DC1SW, + AXP809_DC5LDO, + AXP809_ALDO1, + AXP809_ALDO2, + AXP809_ALDO3, + AXP809_ELDO1, + AXP809_ELDO2, + AXP809_ELDO3, + AXP809_DLDO1, + AXP809_DLDO2, + AXP809_RTC_LDO, + AXP809_LDO_IO0, + AXP809_LDO_IO1, + AXP809_SW, + AXP809_REG_ID_MAX, +}; + /* IRQs */ enum { AXP152_IRQ_LDO0IN_CONNECT = 1, @@ -390,6 +414,41 @@ enum axp288_irqs { AXP288_IRQ_BC_USB_CHNG, }; +enum axp809_irqs { + AXP809_IRQ_ACIN_OVER_V = 1, + AXP809_IRQ_ACIN_PLUGIN, + AXP809_IRQ_ACIN_REMOVAL, + AXP809_IRQ_VBUS_OVER_V, + AXP809_IRQ_VBUS_PLUGIN, + AXP809_IRQ_VBUS_REMOVAL, + AXP809_IRQ_VBUS_V_LOW, + AXP809_IRQ_BATT_PLUGIN, + AXP809_IRQ_BATT_REMOVAL, + AXP809_IRQ_BATT_ENT_ACT_MODE, + AXP809_IRQ_BATT_EXIT_ACT_MODE, + AXP809_IRQ_CHARG, + AXP809_IRQ_CHARG_DONE, + AXP809_IRQ_BATT_CHG_TEMP_HIGH, + AXP809_IRQ_BATT_CHG_TEMP_HIGH_END, + AXP809_IRQ_BATT_CHG_TEMP_LOW, + AXP809_IRQ_BATT_CHG_TEMP_LOW_END, + AXP809_IRQ_BATT_ACT_TEMP_HIGH, + AXP809_IRQ_BATT_ACT_TEMP_HIGH_END, + AXP809_IRQ_BATT_ACT_TEMP_LOW, + AXP809_IRQ_BATT_ACT_TEMP_LOW_END, + AXP809_IRQ_DIE_TEMP_HIGH, + AXP809_IRQ_LOW_PWR_LVL1, + AXP809_IRQ_LOW_PWR_LVL2, + AXP809_IRQ_TIMER, + AXP809_IRQ_PEK_RIS_EDGE, + AXP809_IRQ_PEK_FAL_EDGE, + AXP809_IRQ_PEK_SHORT, + AXP809_IRQ_PEK_LONG, + AXP809_IRQ_PEK_OVER_OFF, + AXP809_IRQ_GPIO1_INPUT, + AXP809_IRQ_GPIO0_INPUT, +}; + #define AXP288_TS_ADC_H 0x58 #define AXP288_TS_ADC_L 0x59 #define AXP288_GP_ADC_H 0x5a diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 9837f1e8c94c..99c0395fe1f9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -131,4 +131,8 @@ static inline int mfd_add_hotplug_devices(struct device *parent, extern void mfd_remove_devices(struct device *parent); +extern int devm_mfd_add_devices(struct device *dev, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base, struct irq_domain *irq_domain); #endif diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index a677c2bd485c..64184d27e3cd 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -50,9 +50,11 @@ enum { EC_MSG_TX_TRAILER_BYTES, EC_MSG_RX_PROTO_BYTES = 3, - /* Max length of messages */ - EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, }; /* diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h new file mode 100644 index 000000000000..dbbe9a644622 --- /dev/null +++ b/include/linux/mfd/hi655x-pmic.h @@ -0,0 +1,55 @@ +/* + * Device driver for regulators in hi655x IC + * + * Copyright (c) 2016 Hisilicon. + * + * Authors: + * Chen Feng <puck.chen@hisilicon.com> + * Fei Wang <w.f@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI655X_PMIC_H +#define __HI655X_PMIC_H + +/* Hi655x registers are mapped to memory bus in 4 bytes stride */ +#define HI655X_STRIDE 4 +#define HI655X_BUS_ADDR(x) ((x) << 2) + +#define HI655X_BITS 8 + +#define HI655X_NR_IRQ 32 + +#define HI655X_IRQ_STAT_BASE (0x003 << 2) +#define HI655X_IRQ_MASK_BASE (0x007 << 2) +#define HI655X_ANA_IRQM_BASE (0x1b5 << 2) +#define HI655X_IRQ_ARRAY 4 +#define HI655X_IRQ_MASK 0xFF +#define HI655X_IRQ_CLR 0xFF +#define HI655X_VER_REG 0x00 + +#define PMU_VER_START 0x10 +#define PMU_VER_END 0x38 + +#define RESERVE_INT BIT(7) +#define PWRON_D20R_INT BIT(6) +#define PWRON_D20F_INT BIT(5) +#define PWRON_D4SR_INT BIT(4) +#define VSYS_6P0_D200UR_INT BIT(3) +#define VSYS_UV_D3R_INT BIT(2) +#define VSYS_2P5_R_INT BIT(1) +#define OTMP_D1R_INT BIT(0) + +struct hi655x_pmic { + struct resource *res; + struct device *dev; + struct regmap *regmap; + int gpio; + unsigned int ver; + struct regmap_irq_chip_data *irq_data; +}; + +#endif diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h new file mode 100644 index 000000000000..3ca0af07fc78 --- /dev/null +++ b/include/linux/mfd/max77620.h @@ -0,0 +1,346 @@ +/* + * Defining registers address and its bit definitions of MAX77620 and MAX20024 + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _MFD_MAX77620_H_ +#define _MFD_MAX77620_H_ + +#include <linux/types.h> + +/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */ +#define MAX77620_REG_CNFGGLBL1 0x00 +#define MAX77620_REG_CNFGGLBL2 0x01 +#define MAX77620_REG_CNFGGLBL3 0x02 +#define MAX77620_REG_CNFG1_32K 0x03 +#define MAX77620_REG_CNFGBBC 0x04 +#define MAX77620_REG_IRQTOP 0x05 +#define MAX77620_REG_INTLBT 0x06 +#define MAX77620_REG_IRQSD 0x07 +#define MAX77620_REG_IRQ_LVL2_L0_7 0x08 +#define MAX77620_REG_IRQ_LVL2_L8 0x09 +#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A +#define MAX77620_REG_ONOFFIRQ 0x0B +#define MAX77620_REG_NVERC 0x0C +#define MAX77620_REG_IRQTOPM 0x0D +#define MAX77620_REG_INTENLBT 0x0E +#define MAX77620_REG_IRQMASKSD 0x0F +#define MAX77620_REG_IRQ_MSK_L0_7 0x10 +#define MAX77620_REG_IRQ_MSK_L8 0x11 +#define MAX77620_REG_ONOFFIRQM 0x12 +#define MAX77620_REG_STATLBT 0x13 +#define MAX77620_REG_STATSD 0x14 +#define MAX77620_REG_ONOFFSTAT 0x15 + +/* SD and LDO Registers */ +#define MAX77620_REG_SD0 0x16 +#define MAX77620_REG_SD1 0x17 +#define MAX77620_REG_SD2 0x18 +#define MAX77620_REG_SD3 0x19 +#define MAX77620_REG_SD4 0x1A +#define MAX77620_REG_DVSSD0 0x1B +#define MAX77620_REG_DVSSD1 0x1C +#define MAX77620_REG_SD0_CFG 0x1D +#define MAX77620_REG_SD1_CFG 0x1E +#define MAX77620_REG_SD2_CFG 0x1F +#define MAX77620_REG_SD3_CFG 0x20 +#define MAX77620_REG_SD4_CFG 0x21 +#define MAX77620_REG_SD_CFG2 0x22 +#define MAX77620_REG_LDO0_CFG 0x23 +#define MAX77620_REG_LDO0_CFG2 0x24 +#define MAX77620_REG_LDO1_CFG 0x25 +#define MAX77620_REG_LDO1_CFG2 0x26 +#define MAX77620_REG_LDO2_CFG 0x27 +#define MAX77620_REG_LDO2_CFG2 0x28 +#define MAX77620_REG_LDO3_CFG 0x29 +#define MAX77620_REG_LDO3_CFG2 0x2A +#define MAX77620_REG_LDO4_CFG 0x2B +#define MAX77620_REG_LDO4_CFG2 0x2C +#define MAX77620_REG_LDO5_CFG 0x2D +#define MAX77620_REG_LDO5_CFG2 0x2E +#define MAX77620_REG_LDO6_CFG 0x2F +#define MAX77620_REG_LDO6_CFG2 0x30 +#define MAX77620_REG_LDO7_CFG 0x31 +#define MAX77620_REG_LDO7_CFG2 0x32 +#define MAX77620_REG_LDO8_CFG 0x33 +#define MAX77620_REG_LDO8_CFG2 0x34 +#define MAX77620_REG_LDO_CFG3 0x35 + +#define MAX77620_LDO_SLEW_RATE_MASK 0x1 + +/* LDO Configuration 3 */ +#define MAX77620_TRACK4_MASK BIT(5) +#define MAX77620_TRACK4_SHIFT 5 + +/* Voltage */ +#define MAX77620_SDX_VOLT_MASK 0xFF +#define MAX77620_SD0_VOLT_MASK 0x3F +#define MAX77620_SD1_VOLT_MASK 0x7F +#define MAX77620_LDO_VOLT_MASK 0x3F + +#define MAX77620_REG_GPIO0 0x36 +#define MAX77620_REG_GPIO1 0x37 +#define MAX77620_REG_GPIO2 0x38 +#define MAX77620_REG_GPIO3 0x39 +#define MAX77620_REG_GPIO4 0x3A +#define MAX77620_REG_GPIO5 0x3B +#define MAX77620_REG_GPIO6 0x3C +#define MAX77620_REG_GPIO7 0x3D +#define MAX77620_REG_PUE_GPIO 0x3E +#define MAX77620_REG_PDE_GPIO 0x3F +#define MAX77620_REG_AME_GPIO 0x40 +#define MAX77620_REG_ONOFFCNFG1 0x41 +#define MAX77620_REG_ONOFFCNFG2 0x42 + +/* FPS Registers */ +#define MAX77620_REG_FPS_CFG0 0x43 +#define MAX77620_REG_FPS_CFG1 0x44 +#define MAX77620_REG_FPS_CFG2 0x45 +#define MAX77620_REG_FPS_LDO0 0x46 +#define MAX77620_REG_FPS_LDO1 0x47 +#define MAX77620_REG_FPS_LDO2 0x48 +#define MAX77620_REG_FPS_LDO3 0x49 +#define MAX77620_REG_FPS_LDO4 0x4A +#define MAX77620_REG_FPS_LDO5 0x4B +#define MAX77620_REG_FPS_LDO6 0x4C +#define MAX77620_REG_FPS_LDO7 0x4D +#define MAX77620_REG_FPS_LDO8 0x4E +#define MAX77620_REG_FPS_SD0 0x4F +#define MAX77620_REG_FPS_SD1 0x50 +#define MAX77620_REG_FPS_SD2 0x51 +#define MAX77620_REG_FPS_SD3 0x52 +#define MAX77620_REG_FPS_SD4 0x53 +#define MAX77620_REG_FPS_NONE 0 + +#define MAX77620_FPS_SRC_MASK 0xC0 +#define MAX77620_FPS_SRC_SHIFT 6 +#define MAX77620_FPS_PU_PERIOD_MASK 0x38 +#define MAX77620_FPS_PU_PERIOD_SHIFT 3 +#define MAX77620_FPS_PD_PERIOD_MASK 0x07 +#define MAX77620_FPS_PD_PERIOD_SHIFT 0 +#define MAX77620_FPS_TIME_PERIOD_MASK 0x38 +#define MAX77620_FPS_TIME_PERIOD_SHIFT 3 +#define MAX77620_FPS_EN_SRC_MASK 0x06 +#define MAX77620_FPS_EN_SRC_SHIFT 1 +#define MAX77620_FPS_ENFPS_SW_MASK 0x01 +#define MAX77620_FPS_ENFPS_SW 0x01 + +/* Minimum and maximum FPS period time (in microseconds) are + * different for MAX77620 and Max20024. + */ +#define MAX77620_FPS_PERIOD_MIN_US 40 +#define MAX20024_FPS_PERIOD_MIN_US 20 + +#define MAX77620_FPS_PERIOD_MAX_US 2560 +#define MAX20024_FPS_PERIOD_MAX_US 5120 + +#define MAX77620_REG_FPS_GPIO1 0x54 +#define MAX77620_REG_FPS_GPIO2 0x55 +#define MAX77620_REG_FPS_GPIO3 0x56 +#define MAX77620_REG_FPS_RSO 0x57 +#define MAX77620_REG_CID0 0x58 +#define MAX77620_REG_CID1 0x59 +#define MAX77620_REG_CID2 0x5A +#define MAX77620_REG_CID3 0x5B +#define MAX77620_REG_CID4 0x5C +#define MAX77620_REG_CID5 0x5D + +#define MAX77620_REG_DVSSD4 0x5E +#define MAX20024_REG_MAX_ADD 0x70 + +#define MAX77620_CID_DIDM_MASK 0xF0 +#define MAX77620_CID_DIDM_SHIFT 4 + +/* CNCG2SD */ +#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1) +#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2) + +/* Device Identification Metal */ +#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF) +/* Device Indentification OTP */ +#define MAX77620_CID5_DIDO(n) ((n) & 0xF) + +/* SD CNFG1 */ +#define MAX77620_SD_SR_MASK 0xC0 +#define MAX77620_SD_SR_SHIFT 6 +#define MAX77620_SD_POWER_MODE_MASK 0x30 +#define MAX77620_SD_POWER_MODE_SHIFT 4 +#define MAX77620_SD_CFG1_ADE_MASK BIT(3) +#define MAX77620_SD_CFG1_ADE_DISABLE 0 +#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3) +#define MAX77620_SD_FPWM_MASK 0x04 +#define MAX77620_SD_FPWM_SHIFT 2 +#define MAX77620_SD_FSRADE_MASK 0x01 +#define MAX77620_SD_FSRADE_SHIFT 0 +#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) +#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 +#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) +#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 +#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) + +/* LDO_CNFG2 */ +#define MAX77620_LDO_POWER_MODE_MASK 0xC0 +#define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX77620_LDO_CFG2_ADE_MASK BIT(1) +#define MAX77620_LDO_CFG2_ADE_DISABLE 0 +#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) +#define MAX77620_LDO_CFG2_SS_MASK BIT(0) +#define MAX77620_LDO_CFG2_SS_FAST BIT(0) +#define MAX77620_LDO_CFG2_SS_SLOW 0 + +#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7) +#define MAX77620_IRQ_TOP_SD_MASK BIT(6) +#define MAX77620_IRQ_TOP_LDO_MASK BIT(5) +#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4) +#define MAX77620_IRQ_TOP_RTC_MASK BIT(3) +#define MAX77620_IRQ_TOP_32K_MASK BIT(2) +#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1) + +#define MAX77620_IRQ_LBM_MASK BIT(3) +#define MAX77620_IRQ_TJALRM1_MASK BIT(2) +#define MAX77620_IRQ_TJALRM2_MASK BIT(1) + +#define MAX77620_PWR_I2C_ADDR 0x3c +#define MAX77620_RTC_I2C_ADDR 0x68 + +#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0) +#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0) +#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0 +#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1) +#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1) +#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0 +#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0 +#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4) +#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4) +#define MAX77620_CNFG_GPIO_INT_RISING BIT(5) +#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6) +#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6) +#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6) +#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6) +#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6) + +#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0) +#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1) +#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2) +#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3) +#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4) +#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5) +#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6) +#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7) + +#define MAX77620_CNFG1_32K_OUT0_EN BIT(2) + +#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7) +#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38 +#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3 +#define MAX77620_ONOFFCNFG1_SLPEN BIT(2) +#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1) +#define MAX20024_ONOFFCNFG1_CLRSE 0x18 + +#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7) +#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6) +#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5) +#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2) +#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0) + +#define MAX77620_GLBLM_MASK BIT(0) + +#define MAX77620_WDTC_MASK 0x3 +#define MAX77620_WDTOFFC BIT(4) +#define MAX77620_WDTSLPC BIT(3) +#define MAX77620_WDTEN BIT(2) + +#define MAX77620_TWD_MASK 0x3 +#define MAX77620_TWD_2s 0x0 +#define MAX77620_TWD_16s 0x1 +#define MAX77620_TWD_64s 0x2 +#define MAX77620_TWD_128s 0x3 + +#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7) +#define MAX77620_CNFGGLBL1_MPPLD BIT(6) +#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4)) +#define MAX77620_CNFGGLBL1_LBDAC 0x0E +#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0) + +/* CNFG BBC registers */ +#define MAX77620_CNFGBBC_ENABLE BIT(0) +#define MAX77620_CNFGBBC_CURRENT_MASK 0x06 +#define MAX77620_CNFGBBC_CURRENT_SHIFT 1 +#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18 +#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3 +#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5) +#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0 +#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6 + +#define MAX77620_FPS_COUNT 3 + +/* Interrupts */ +enum { + MAX77620_IRQ_TOP_GLBL, /* Low-Battery */ + MAX77620_IRQ_TOP_SD, /* SD power fail */ + MAX77620_IRQ_TOP_LDO, /* LDO power fail */ + MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */ + MAX77620_IRQ_TOP_RTC, /* RTC */ + MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */ + MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */ + MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */ +}; + +/* GPIOs */ +enum { + MAX77620_GPIO0, + MAX77620_GPIO1, + MAX77620_GPIO2, + MAX77620_GPIO3, + MAX77620_GPIO4, + MAX77620_GPIO5, + MAX77620_GPIO6, + MAX77620_GPIO7, + MAX77620_GPIO_NR, +}; + +/* FPS Source */ +enum max77620_fps_src { + MAX77620_FPS_SRC_0, + MAX77620_FPS_SRC_1, + MAX77620_FPS_SRC_2, + MAX77620_FPS_SRC_NONE, + MAX77620_FPS_SRC_DEF, +}; + +enum max77620_chip_id { + MAX77620, + MAX20024, +}; + +struct max77620_chip { + struct device *dev; + struct regmap *rmap; + + int chip_irq; + int irq_base; + + /* chip id */ + enum max77620_chip_id chip_id; + + bool sleep_enable; + bool enable_global_lpm; + int shutdown_fps_period[MAX77620_FPS_COUNT]; + int suspend_fps_period[MAX77620_FPS_COUNT]; + + struct regmap_irq_chip_data *top_irq_data; + struct regmap_irq_chip_data *gpio_irq_data; +}; + +#endif /* _MFD_MAX77620_H_ */ diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 1088149be0c9..40a76b97b7ab 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -16,6 +16,7 @@ #define __LINUX_MFD_SYSCON_H__ #include <linux/err.h> +#include <linux/errno.h> struct device_node; diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h index 9352adc95de6..76f30f940c70 100644 --- a/include/linux/mfd/syscon/exynos5-pmu.h +++ b/include/linux/mfd/syscon/exynos5-pmu.h @@ -38,6 +38,9 @@ /* Exynos5433 specific register definitions */ #define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728) +#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710) +#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714) +#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718) #define EXYNOS5_PHY_ENABLE BIT(0) diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 238c8db953eb..c8e0164c5423 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -95,6 +95,7 @@ #define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0) #define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30) +#define IMX6Q_GPR1_PCIE_SW_RST BIT(29) #define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28) #define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27) #define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26) @@ -447,5 +448,11 @@ #define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18) #define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17) #define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17) +#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19) +#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20) +#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21) +#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19) +#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \ + IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR) #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 8f9fc3d26e6d..8e95cd87cd74 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -134,6 +134,7 @@ #define TWL6040_HFDACENA (1 << 0) #define TWL6040_HFPGAENA (1 << 1) #define TWL6040_HFDRVENA (1 << 4) +#define TWL6040_HFSWENA (1 << 6) /* VIBCTLL/R (0x18/0x1A) fields */ diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h index 2de565b94d0c..4ee908f5b834 100644 --- a/include/linux/mfd/wm8400-private.h +++ b/include/linux/mfd/wm8400-private.h @@ -923,7 +923,6 @@ struct wm8400 { #define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ #define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ -u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg); int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b2c9fada8eac..2be976dd4966 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -53,6 +53,11 @@ struct mlx5_core_cq { unsigned arm_sn; struct mlx5_rsc_debug *dbg; int pid; + struct { + struct list_head list; + void (*comp)(struct mlx5_core_cq *); + void *priv; + } tasklet_ctx; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 035abdf62cfe..73a48479892d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out { u8 rsvd[8]; }; -#define MLX5_CMD_OP_MAX 0x920 - enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, @@ -1369,6 +1367,12 @@ enum mlx5_cap_type { #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) + +#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 07b504f7eb84..80776d0c52dc 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -42,6 +42,7 @@ #include <linux/vmalloc.h> #include <linux/radix-tree.h> #include <linux/workqueue.h> +#include <linux/interrupt.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -312,6 +313,14 @@ struct mlx5_buf { u8 page_shift; }; +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -325,6 +334,7 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; + struct mlx5_eq_tasklet tasklet_ctx; }; struct mlx5_core_psv { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9a05cd7e5890..e955a2859009 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -205,7 +205,8 @@ enum { MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, - MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, + MLX5_CMD_OP_MAX }; struct mlx5_ifc_flow_table_fields_supported_bits { @@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x1b]; + u8 reserved_at_5[0x19]; + u8 nic_vport_node_guid_modify[0x1]; + u8 nic_vport_port_guid_modify[0x1]; u8 reserved_at_20[0x7e0]; }; @@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_at_0[0x19]; + u8 reserved_at_0[0x16]; + u8 node_guid[0x1]; + u8 port_guid[0x1]; + u8 reserved_at_18[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 64221027bf1f..ab310819ac36 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -172,6 +172,7 @@ enum { enum { MLX5_FENCE_MODE_NONE = 0 << 5, MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_FENCE = 2 << 5, MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, }; @@ -460,10 +461,9 @@ struct mlx5_core_qp { }; struct mlx5_qp_path { - u8 fl; + u8 fl_free_ar; u8 rsvd3; - u8 free_ar; - u8 pkey_index; + __be16 pkey_index; u8 rsvd0; u8 grh_mlid; __be16 rlid; @@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in { __be32 optparam; u8 rsvd0[4]; struct mlx5_qp_context ctx; + u8 rsvd2[16]; }; struct mlx5_modify_qp_mbox_out { diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 301da4a5e6bf..6c16c198f680 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); +int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, + u32 vport, u64 node_guid); int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u16 *qkey_viol_cntr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, diff --git a/include/linux/mm.h b/include/linux/mm.h index 727f799757ab..ece042dfe23c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -303,6 +303,12 @@ struct vm_fault { * is set (which is also implied by * VM_FAULT_ERROR). */ + void *entry; /* ->fault handler can alternatively + * return locked DAX entry. In that + * case handler should return + * VM_FAULT_DAX_LOCKED and fill in + * entry here. + */ /* for ->map_pages() only */ pgoff_t max_pgoff; /* map pages for offset from pgoff till * max_pgoff inclusive */ @@ -447,14 +453,14 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ -static inline int is_vmalloc_addr(const void *x) +static inline bool is_vmalloc_addr(const void *x) { #ifdef CONFIG_MMU unsigned long addr = (unsigned long)x; return addr >= VMALLOC_START && addr < VMALLOC_END; #else - return 0; + return false; #endif } #ifdef CONFIG_MMU @@ -475,8 +481,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) static inline int compound_mapcount(struct page *page) { - if (!PageCompound(page)) - return 0; + VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); return atomic_read(compound_mapcount_ptr(page)) + 1; } @@ -734,7 +739,7 @@ static inline void get_page(struct page *page) page = compound_head(page); /* * Getting a normal page or the head of a compound page - * requires to already have an elevated page->_count. + * requires to already have an elevated page->_refcount. */ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); page_ref_inc(page); @@ -850,10 +855,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { - int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; - - page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); - page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; + page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ #else /* !CONFIG_NUMA_BALANCING */ @@ -1032,26 +1034,7 @@ static inline pgoff_t page_file_index(struct page *page) return page->index; } -/* - * Return true if this page is mapped into pagetables. - * For compound page it returns true if any subpage of compound page is mapped. - */ -static inline bool page_mapped(struct page *page) -{ - int i; - if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) >= 0; - page = compound_head(page); - if (atomic_read(compound_mapcount_ptr(page)) >= 0) - return true; - if (PageHuge(page)) - return false; - for (i = 0; i < hpage_nr_pages(page); i++) { - if (atomic_read(&page[i]._mapcount) >= 0) - return true; - } - return false; -} +bool page_mapped(struct page *page); /* * Return true only if the page has been allocated with @@ -1099,6 +1082,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ +#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ @@ -1786,7 +1770,7 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(unsigned long start, unsigned long end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) @@ -2034,9 +2018,9 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif /* These take the mm semaphore themselves */ -extern unsigned long vm_brk(unsigned long, unsigned long); +extern int __must_check vm_brk(unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); -extern unsigned long vm_mmap(struct file *, unsigned long, +extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -2409,6 +2393,9 @@ static inline bool page_is_guard(struct page *page) return false; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return false; + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 712e8c37a200..5bd29ba4f174 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page) return !PageSwapBacked(page); } +static __always_inline void __update_lru_size(struct lruvec *lruvec, + enum lru_list lru, int nr_pages) +{ + __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); +} + +static __always_inline void update_lru_size(struct lruvec *lruvec, + enum lru_list lru, int nr_pages) +{ +#ifdef CONFIG_MEMCG + mem_cgroup_update_lru_size(lruvec, lru, nr_pages); +#else + __update_lru_size(lruvec, lru, nr_pages); +#endif +} + static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, nr_pages); + update_lru_size(lruvec, lru, hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); } static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - int nr_pages = hpage_nr_pages(page); - mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); list_del(&page->lru); - __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); + update_lru_size(lruvec, lru, -hpage_nr_pages(page)); } /** diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c2d75b4fa86c..ca3e517980a0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -12,6 +12,7 @@ #include <linux/cpumask.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> +#include <linux/workqueue.h> #include <asm/page.h> #include <asm/mmu.h> @@ -73,9 +74,9 @@ struct page { unsigned long counters; #else /* - * Keep _count separate from slub cmpxchg_double data. - * As the rest of the double word is protected by - * slab_lock but _count is not. + * Keep _refcount separate from slub cmpxchg_double + * data. As the rest of the double word is protected by + * slab_lock but _refcount is not. */ unsigned counters; #endif @@ -97,7 +98,11 @@ struct page { }; int units; /* SLOB */ }; - atomic_t _count; /* Usage count, see below. */ + /* + * Usage count, *USE WRAPPER FUNCTION* + * when manual accounting. See page_ref.h + */ + atomic_t _refcount; }; unsigned int active; /* SLAB */ }; @@ -248,7 +253,7 @@ struct page_frag_cache { __u32 offset; #endif /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_count every time we allocate a fragment. + * containing page->_refcount every time we allocate a fragment. */ unsigned int pagecnt_bias; bool pfmemalloc; @@ -509,6 +514,9 @@ struct mm_struct { #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif +#ifdef CONFIG_MMU + struct work_struct async_put_work; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 85800b48241f..45cde8cd39f2 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -329,6 +329,7 @@ struct mmc_host { unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ + unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ @@ -526,4 +527,7 @@ static inline void mmc_retune_recheck(struct mmc_host *host) host->retune_now = 1; } +void mmc_retune_pause(struct mmc_host *host); +void mmc_retune_unpause(struct mmc_host *host); + #endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 83430f2ea757..0d126aeb3ec0 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -36,6 +36,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c60df9257cc7..02069c23486d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -85,13 +85,6 @@ extern int page_group_by_mobility_disabled; get_pfnblock_flags_mask(page, page_to_pfn(page), \ PB_migrate_end, MIGRATETYPE_MASK) -static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) -{ - BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); - return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, - MIGRATETYPE_MASK); -} - struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; @@ -746,8 +739,12 @@ static inline bool is_dev_zone(const struct zone *zone) extern struct mutex zonelists_mutex; void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, + long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, int alloc_flags); + unsigned long mark, int classzone_idx, + unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); enum memmap_context { @@ -828,10 +825,7 @@ static inline int is_highmem_idx(enum zone_type idx) static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM - int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; - return zone_off == ZONE_HIGHMEM * sizeof(*zone) || - (zone_off == ZONE_MOVABLE * sizeof(*zone) && - zone_movable_is_highmem()); + return is_highmem_idx(zone_idx(zone)); #else return 0; #endif @@ -922,6 +916,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) #endif /* CONFIG_NUMA */ } +struct zoneref *__next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes); + /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point * @z - The cursor used as a starting point for the search @@ -934,9 +932,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) * being examined. It should be advanced by one before calling * next_zones_zonelist again. */ -struct zoneref *next_zones_zonelist(struct zoneref *z, +static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes); + nodemask_t *nodes) +{ + if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) + return z; + return __next_zones_zonelist(z, highest_zoneidx, nodes); +} /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist @@ -952,13 +955,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, - nodemask_t *nodes, - struct zone **zone) + nodemask_t *nodes) { - struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, + return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes); - *zone = zonelist_zone(z); - return z; } /** @@ -973,10 +973,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, * within a given nodemask */ #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ - for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ + for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask), \ + zone = zonelist_zone(z)) + +#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (zone = z->zone; \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ - zone = zonelist_zone(z)) \ + zone = zonelist_zone(z)) + /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index @@ -1056,7 +1063,7 @@ struct mem_section { unsigned long *pageblock_flags; #ifdef CONFIG_PAGE_EXTENSION /* - * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use * section. (see page_ext.h about this.) */ struct page_ext *page_ext; diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h index c8be32e9fc49..ad3c3488073c 100644 --- a/include/linux/mtd/fsmc.h +++ b/include/linux/mtd/fsmc.h @@ -103,24 +103,6 @@ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) -/* - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 - * and it has to be read consecutively and immediately after the 512 - * byte data block for hardware to generate the error bit offsets - * Managing the ecc bytes in the following way is easier. This way is - * similar to oobfree structure maintained already in u-boot nand driver - */ -#define MAX_ECCPLACE_ENTRIES 32 - -struct fsmc_nand_eccplace { - uint8_t offset; - uint8_t length; -}; - -struct fsmc_eccplace { - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; -}; - struct fsmc_nand_timings { uint8_t tclr; uint8_t tar; diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 5e0eb7ccabd4..3aa56e3104bb 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -122,18 +122,13 @@ #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 -# ifdef map_bankwidth -# undef map_bankwidth -# define map_bankwidth(map) ((map)->bankwidth) -# undef map_bankwidth_is_large -# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) -# undef map_words -# define map_words(map) map_calc_words(map) -# else -# define map_bankwidth(map) 32 -# define map_bankwidth_is_large(map) (1) -# define map_words(map) map_calc_words(map) -# endif +/* always use indirect access for 256-bit to preserve kernel stack */ +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 32 diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index ef9fea4fc400..29a170612203 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -96,16 +96,35 @@ struct mtd_oob_ops { #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 +/** + * struct mtd_oob_region - oob region definition + * @offset: region offset + * @length: region length + * + * This structure describes a region of the OOB area, and is used + * to retrieve ECC or free bytes sections. + * Each section is defined by an offset within the OOB area and a + * length. + */ +struct mtd_oob_region { + u32 offset; + u32 length; +}; + /* - * Internal ECC layout control structure. For historical reasons, there is a - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained - * for export to user-space via the ECCGETLAYOUT ioctl. - * nand_ecclayout should be expandable in the future simply by the above macros. + * struct mtd_ooblayout_ops - NAND OOB layout operations + * @ecc: function returning an ECC region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * ECC sections. + * @free: function returning a free region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * free sections. */ -struct nand_ecclayout { - __u32 eccbytes; - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; +struct mtd_ooblayout_ops { + int (*ecc)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); + int (*free)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); }; struct module; /* only needed for owner field in mtd_info */ @@ -166,8 +185,8 @@ struct mtd_info { const char *name; int index; - /* ECC layout structure pointer - read only! */ - struct nand_ecclayout *ecclayout; + /* OOB layout description */ + const struct mtd_ooblayout_ops *ooblayout; /* the ecc step size. */ unsigned int ecc_step_size; @@ -253,6 +272,30 @@ struct mtd_info { int usecount; }; +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion); +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); + +static inline void mtd_set_ooblayout(struct mtd_info *mtd, + const struct mtd_ooblayout_ops *ooblayout) +{ + mtd->ooblayout = ooblayout; +} + static inline void mtd_set_of_node(struct mtd_info *mtd, struct device_node *np) { diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 56574ba36555..fbe8e164a4ee 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,9 +116,14 @@ typedef enum { NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_HW_OOB_FIRST, - NAND_ECC_SOFT_BCH, } nand_ecc_modes_t; +enum nand_ecc_algo { + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, +}; + /* * Constants for Hardware ECC */ @@ -458,6 +463,7 @@ struct nand_hw_control { /** * struct nand_ecc_ctrl - Control structure for ECC * @mode: ECC mode + * @algo: ECC algorithm * @steps: number of ECC steps per page * @size: data bytes per ECC step * @bytes: ECC bytes per step @@ -466,7 +472,6 @@ struct nand_hw_control { * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators * @options: ECC specific options (see NAND_ECC_XXX flags defined above) - * @layout: ECC layout control struct pointer * @priv: pointer to private ECC control data * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available @@ -508,6 +513,7 @@ struct nand_hw_control { */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; + enum nand_ecc_algo algo; int steps; int size; int bytes; @@ -516,7 +522,6 @@ struct nand_ecc_ctrl { int prepad; int postpad; unsigned int options; - struct nand_ecclayout *layout; void *priv; void (*hwctl)(struct mtd_info *mtd, int mode); int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, @@ -740,6 +745,9 @@ struct nand_chip { void *priv; }; +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; + static inline void nand_set_flash_node(struct nand_chip *chip, struct device_node *np) { @@ -1070,4 +1078,18 @@ int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, int threshold); + +/* Default write_oob implementation */ +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default write_oob syndrome implementation */ +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); + +/* Default read_oob implementation */ +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default read_oob syndrome implementation */ +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 4596503c9da9..0aaa98b219a4 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -80,7 +80,6 @@ struct onenand_bufferram { * @page_buf: [INTERN] page main data buffer * @oob_buf: [INTERN] page oob data buffer * @subpagesize: [INTERN] holds the subpagesize - * @ecclayout: [REPLACEABLE] the default ecc placement scheme * @bbm: [REPLACEABLE] pointer to Bad Block Management * @priv: [OPTIONAL] pointer to private chip date */ @@ -134,7 +133,6 @@ struct onenand_chip { #endif int subpagesize; - struct nand_ecclayout *ecclayout; void *bbm; diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 25f4d2a845c1..65e91d0fa981 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -14,7 +14,7 @@ struct sharpsl_nand_platform_data { struct nand_bbt_descr *badblock_pattern; - struct nand_ecclayout *ecc_layout; + const struct mtd_ooblayout_ops *ecc_layout; struct mtd_partition *partitions; unsigned int nr_partitions; }; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 3c36113a88e1..7f041bd88b82 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -21,6 +21,7 @@ * Sometimes these are the same as CFI IDs, but sometimes they aren't. */ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL #define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX diff --git a/include/linux/namei.h b/include/linux/namei.h index ec5ec2818a28..d3d0398f2a1b 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_ROOT 0x2000 #define LOOKUP_EMPTY 0x4000 +extern int path_pts(struct path *path); + extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); static inline int user_path_at(int dfd, const char __user *name, unsigned flags, diff --git a/include/linux/nd.h b/include/linux/nd.h index 5489ab756d1a..aee2761d294c 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -15,6 +15,7 @@ #include <linux/fs.h> #include <linux/ndctl.h> #include <linux/device.h> +#include <linux/badblocks.h> enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev) } /** - * struct nd_namespace_io - infrastructure for loading an nd_pmem instance + * struct nd_namespace_io - device representation of a persistent memory range * @dev: namespace device created by the nd region driver * @res: struct resource conversion of a NFIT SPA table + * @size: cached resource_size(@res) for fast path size checks + * @addr: virtual address to access the namespace range + * @bb: badblocks list for the namespace range */ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; + resource_size_t size; + void __pmem *addr; + struct badblocks bb; }; /** @@ -82,6 +89,7 @@ struct nd_namespace_pmem { * @uuid: namespace name supplied in the dimm label * @id: ida allocated id * @lbasize: blk namespaces have a native sector size when btt not present + * @size: sum of all the resource ranges allocated to this namespace * @num_resources: number of dpa extents to claim * @res: discontiguous dpa extents for given dimm */ @@ -91,6 +99,7 @@ struct nd_namespace_blk { u8 *uuid; int id; unsigned long lbasize; + resource_size_t size; int num_resources; struct resource **res; }; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index bc8736266749..aa7b2400f98c 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -44,8 +44,8 @@ enum { NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ - NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ - NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ + NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */ + NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 @@ -121,8 +121,8 @@ enum { #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) -#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) -#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) +#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4) +#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) #define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) @@ -200,8 +200,8 @@ enum { #define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ - NETIF_F_GSO_IPIP | \ - NETIF_F_GSO_SIT | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c148edfe4965..f45929ce8157 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4006,8 +4006,8 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 011433478a14..bfed6b367350 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -50,12 +50,27 @@ struct nfs4_label { typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; -struct nfs_stateid4 { - __be32 seqid; - char other[NFS4_STATEID_OTHER_SIZE]; -} __attribute__ ((packed)); +struct nfs4_stateid_struct { + union { + char data[NFS4_STATEID_SIZE]; + struct { + __be32 seqid; + char other[NFS4_STATEID_OTHER_SIZE]; + } __attribute__ ((packed)); + }; + + enum { + NFS4_INVALID_STATEID_TYPE = 0, + NFS4_SPECIAL_STATEID_TYPE, + NFS4_OPEN_STATEID_TYPE, + NFS4_LOCK_STATEID_TYPE, + NFS4_DELEGATION_STATEID_TYPE, + NFS4_LAYOUT_STATEID_TYPE, + NFS4_PNFS_DS_STATEID_TYPE, + } type; +}; -typedef struct nfs_stateid4 nfs4_stateid; +typedef struct nfs4_stateid_struct nfs4_stateid; enum nfs_opnum4 { OP_ACCESS = 3, @@ -504,6 +519,7 @@ enum { NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, NFSPROC4_CLNT_CLONE, + NFSPROC4_CLNT_COPY, }; /* nfs41 types */ @@ -621,7 +637,9 @@ enum pnfs_update_layout_reason { PNFS_UPDATE_LAYOUT_IO_TEST_FAIL, PNFS_UPDATE_LAYOUT_FOUND_CACHED, PNFS_UPDATE_LAYOUT_RETURN, + PNFS_UPDATE_LAYOUT_RETRY, PNFS_UPDATE_LAYOUT_BLOCKED, + PNFS_UPDATE_LAYOUT_INVALID_OPEN, PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, }; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 7fcc13c8cf1f..14a762d2734d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -246,5 +246,6 @@ struct nfs_server { #define NFS_CAP_DEALLOCATE (1U << 21) #define NFS_CAP_LAYOUTSTATS (1U << 22) #define NFS_CAP_CLONE (1U << 23) +#define NFS_CAP_COPY (1U << 24) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index ee8491dadbf3..c304a11b5b1a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -233,7 +233,6 @@ struct nfs4_layoutget_args { struct inode *inode; struct nfs_open_context *ctx; nfs4_stateid stateid; - unsigned long timestamp; struct nfs4_layoutdriver_data layout; }; @@ -251,7 +250,6 @@ struct nfs4_layoutget { struct nfs4_layoutget_res res; struct rpc_cred *cred; gfp_t gfp_flags; - long timeout; }; struct nfs4_getdeviceinfo_args { @@ -1343,6 +1341,32 @@ struct nfs42_falloc_res { const struct nfs_server *falloc_server; }; +struct nfs42_copy_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *src_fh; + nfs4_stateid src_stateid; + u64 src_pos; + + struct nfs_fh *dst_fh; + nfs4_stateid dst_stateid; + u64 dst_pos; + + u64 count; +}; + +struct nfs42_write_res { + u64 count; + struct nfs_writeverf verifier; +}; + +struct nfs42_copy_res { + struct nfs4_sequence_res seq_res; + struct nfs42_write_res write_res; + bool consecutive; + bool synchronous; +}; + struct nfs42_seek_args { struct nfs4_sequence_args seq_args; @@ -1431,7 +1455,7 @@ struct nfs_commit_completion_ops { }; struct nfs_commit_info { - spinlock_t *lock; /* inode->i_lock */ + struct inode *inode; /* Needed for inode->i_lock */ struct nfs_mds_commit_info *mds; struct pnfs_ds_commit_info *ds; struct nfs_direct_req *dreq; /* O_DIRECT request */ diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index e9fcf90b270d..5988dd57ba66 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * - * You should have received a copy of the GNU Lesser General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net> - * Ryusuke Konishi <ryusuke@osrg.net> + * Written by Koji Sato and Ryusuke Konishi. */ /* * linux/include/linux/ext2_fs.h @@ -132,10 +127,14 @@ struct nilfs_super_root { #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ -#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order - semantics also for data */ -#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during - mount-time recovery */ +#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* + * Apply strict in-order + * semantics also for data + */ +#define NILFS_MOUNT_NORECOVERY 0x4000 /* + * Disable write access during + * mount-time recovery + */ #define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */ @@ -147,16 +146,20 @@ struct nilfs_super_block { __le16 s_minor_rev_level; /* minor revision level */ __le16 s_magic; /* Magic signature */ - __le16 s_bytes; /* Bytes count of CRC calculation - for this structure. s_reserved - is excluded. */ + __le16 s_bytes; /* + * Bytes count of CRC calculation + * for this structure. s_reserved + * is excluded. + */ __le16 s_flags; /* flags */ __le32 s_crc_seed; /* Seed value of CRC calculation */ /*10*/ __le32 s_sum; /* Check sum of super block */ - __le32 s_log_block_size; /* Block size represented as follows - blocksize = - 1 << (s_log_block_size + 10) */ + __le32 s_log_block_size; /* + * Block size represented as follows + * blocksize = + * 1 << (s_log_block_size + 10) + */ __le64 s_nsegments; /* Number of segments in filesystem */ /*20*/ __le64 s_dev_size; /* block device size in bytes */ __le64 s_first_data_block; /* 1st seg disk block number */ @@ -168,8 +171,10 @@ struct nilfs_super_block { __le64 s_last_seq; /* seq. number of seg written last */ /*50*/ __le64 s_free_blocks_count; /* Free blocks count */ - __le64 s_ctime; /* Creation time (execution time of - newfs) */ + __le64 s_ctime; /* + * Creation time (execution time of + * newfs) + */ /*60*/ __le64 s_mtime; /* Mount time */ __le64 s_wtime; /* Write time */ /*70*/ __le16 s_mnt_count; /* Mount count */ @@ -193,8 +198,10 @@ struct nilfs_super_block { /*A8*/ char s_volume_name[80]; /* volume name */ /*F8*/ __le32 s_c_interval; /* Commit interval of segment */ - __le32 s_c_block_max; /* Threshold of data amount for - the segment construction */ + __le32 s_c_block_max; /* + * Threshold of data amount for + * the segment construction + */ /*100*/ __le64 s_feature_compat; /* Compatible feature set */ __le64 s_feature_compat_ro; /* Read-only compatible feature set */ __le64 s_feature_incompat; /* Incompatible feature set */ @@ -247,12 +254,18 @@ struct nilfs_super_block { #define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ -#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in - a full segment */ -#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in - a partial segment */ -#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved - segments */ +#define NILFS_SEG_MIN_BLOCKS 16 /* + * Minimum number of blocks in + * a full segment + */ +#define NILFS_PSEG_MIN_BLOCKS 2 /* + * Minimum number of blocks in + * a partial segment + */ +#define NILFS_MIN_NRSVSEGS 8 /* + * Minimum number of reserved + * segments + */ /* * We call DAT, cpfile, and sufile root metadata files. Inodes of @@ -327,9 +340,9 @@ enum { ~NILFS_DIR_ROUND) #define NILFS_MAX_REC_LEN ((1<<16)-1) -static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) +static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) { - unsigned len = le16_to_cpu(dlen); + unsigned int len = le16_to_cpu(dlen); #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == NILFS_MAX_REC_LEN) @@ -338,7 +351,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) return len; } -static inline __le16 nilfs_rec_len_to_disk(unsigned len) +static inline __le16 nilfs_rec_len_to_disk(unsigned int len) { #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == (1 << 16)) @@ -518,9 +531,11 @@ struct nilfs_checkpoint { __le64 cp_inodes_count; __le64 cp_blocks_count; - /* Do not change the byte offset of ifile inode. - To keep the compatibility of the disk format, - additional fields should be added behind cp_ifile_inode. */ + /* + * Do not change the byte offset of ifile inode. + * To keep the compatibility of the disk format, + * additional fields should be added behind cp_ifile_inode. + */ struct nilfs_inode cp_ifile_inode; }; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 6e85889cf9ab..f746e44d4046 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -43,8 +43,10 @@ * * int first_node(mask) Number lowest set bit, or MAX_NUMNODES * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * int next_node_in(node, mask) Next node past 'node', or wrap to first, + * or MAX_NUMNODES * int first_unset_node(mask) First node not set in mask, or - * MAX_NUMNODES. + * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set @@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp) return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } +/* + * Find the next present node in src, starting after node n, wrapping around to + * the first node in src if needed. Returns MAX_NUMNODES if src is empty. + */ +#define next_node_in(n, src) __next_node_in((n), &(src)) +int __next_node_in(int node, const nodemask_t *srcp); + static inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index a4fcc90b0f20..cd93416d762e 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -14,6 +14,10 @@ struct nvmem_device; struct nvmem_cell_info; +typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, + void *val, size_t bytes); +typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, + void *val, size_t bytes); struct nvmem_config { struct device *dev; @@ -24,6 +28,12 @@ struct nvmem_config { int ncells; bool read_only; bool root_only; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; /* To be only used by old driver/misc/eeprom drivers */ bool compat; struct device *base_dev; diff --git a/include/linux/of.h b/include/linux/of.h index 77ddace575e8..74eb28cadbef 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -75,6 +75,23 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_phandle_iterator { + /* Common iterator information */ + const char *cells_name; + int cell_count; + const struct device_node *parent; + + /* List size information */ + const __be32 *list_end; + const __be32 *phandle_end; + + /* Current position state */ + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + struct of_reconfig_data { struct device_node *dn; struct property *prop; @@ -334,6 +351,18 @@ extern int of_parse_phandle_with_fixed_args(const struct device_node *np, extern int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name); +/* phandle iterator functions */ +extern int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count); + +extern int of_phandle_iterator_next(struct of_phandle_iterator *it); +extern int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size); + extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); @@ -585,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np, return NULL; } -static inline int of_parse_phandle_with_args(struct device_node *np, +static inline int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, @@ -608,6 +637,27 @@ static inline int of_count_phandle_with_args(struct device_node *np, return -ENOSYS; } +static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size) +{ + return 0; +} + static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; @@ -877,6 +927,12 @@ static inline int of_property_read_s32(const struct device_node *np, return of_property_read_u32(np, propname, (u32*) out_value); } +#define of_for_each_phandle(it, err, np, ln, cn, cc) \ + for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ + err = of_phandle_iterator_next(it); \ + err == 0; \ + err = of_phandle_iterator_next(it)) + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 01c0a556448b..37864734ca50 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -47,10 +47,6 @@ void __iomem *of_io_request_and_map(struct device_node *device, extern const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); -extern int pci_register_io_range(phys_addr_t addr, resource_size_t size); -extern unsigned long pci_address_to_pio(phys_addr_t addr); -extern phys_addr_t pci_pio_to_address(unsigned long pio); - extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); extern struct of_pci_range *of_pci_range_parser_one( @@ -86,11 +82,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, return NULL; } -static inline phys_addr_t pci_pio_to_address(unsigned long pio) -{ - return 0; -} - static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 2fbe8682a66f..901ec01c9fba 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -37,8 +37,9 @@ extern bool of_fdt_is_big_endian(const void *blob, unsigned long node); extern int of_fdt_match(const void *blob, unsigned long node, const char *const *compat); -extern void of_fdt_unflatten_tree(const unsigned long *blob, - struct device_node **mynodes); +extern void *of_fdt_unflatten_tree(const unsigned long *blob, + struct device_node *dad, + struct device_node **mynodes); /* TBD: Temporary export of fdt globals - remove when code fully merged */ extern int __initdata dt_root_addr_cells; diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index f8bcd0e21a26..bb3a5a2cd570 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -15,6 +15,7 @@ #define __LINUX_OF_GRAPH_H #include <linux/types.h> +#include <linux/errno.h> /** * struct of_endpoint - the OF graph endpoint data structure diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index ffbe4707d4aa..bd02b44902d0 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, size_t *size); extern void of_iommu_init(void); -extern struct iommu_ops *of_iommu_configure(struct device *dev, +extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np); #else @@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, } static inline void of_iommu_init(void) { } -static inline struct iommu_ops *of_iommu_configure(struct device *dev, +static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { return NULL; @@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev, #endif /* CONFIG_OF_IOMMU */ -void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); -struct iommu_ops *of_iommu_get_ops(struct device_node *np); +void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); +const struct iommu_ops *of_iommu_get_ops(struct device_node *np); extern struct of_device_id __iommu_of_table; diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h deleted file mode 100644 index e266caa36402..000000000000 --- a/include/linux/of_mtd.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> - * - * OF helpers for mtd. - * - * This file is released under the GPLv2 - */ - -#ifndef __LINUX_OF_MTD_H -#define __LINUX_OF_MTD_H - -#ifdef CONFIG_OF_MTD - -#include <linux/of.h> -int of_get_nand_ecc_mode(struct device_node *np); -int of_get_nand_ecc_step_size(struct device_node *np); -int of_get_nand_ecc_strength(struct device_node *np); -int of_get_nand_bus_width(struct device_node *np); -bool of_get_nand_on_flash_bbt(struct device_node *np); - -#else /* CONFIG_OF_MTD */ - -static inline int of_get_nand_ecc_mode(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_step_size(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_ecc_strength(struct device_node *np) -{ - return -ENOSYS; -} - -static inline int of_get_nand_bus_width(struct device_node *np) -{ - return -ENOSYS; -} - -static inline bool of_get_nand_on_flash_bbt(struct device_node *np) -{ - return false; -} - -#endif /* CONFIG_OF_MTD */ - -#endif /* __LINUX_OF_MTD_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index f6e9e85164e8..b969e9443962 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -8,7 +8,7 @@ struct pci_dev; struct of_phandle_args; struct device_node; -#ifdef CONFIG_OF +#ifdef CONFIG_OF_PCI int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index ad2f67054372..c201060e0c6d 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); int of_reserved_mem_device_init(struct device *dev); void of_reserved_mem_device_release(struct device *dev); +int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, + phys_addr_t align, + phys_addr_t start, + phys_addr_t end, + bool nomap, + phys_addr_t *res_base); + void fdt_init_reserved_mem(void); void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size); diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index d833eb4dd446..9e9d79e8efa5 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -7,161 +7,53 @@ * option) any later version. */ -/* Maximum Number of Chip Selects */ -#define GPMC_CS_NUM 8 +#include <linux/platform_data/gpmc-omap.h> #define GPMC_CONFIG_WP 0x00000005 -#define GPMC_IRQ_FIFOEVENTENABLE 0x01 -#define GPMC_IRQ_COUNT_EVENT 0x02 - -#define GPMC_BURST_4 4 /* 4 word burst */ -#define GPMC_BURST_8 8 /* 8 word burst */ -#define GPMC_BURST_16 16 /* 16 word burst */ -#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ -#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ -#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ -#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ - -/* bool type time settings */ -struct gpmc_bool_timings { - bool cycle2cyclediffcsen; - bool cycle2cyclesamecsen; - bool we_extra_delay; - bool oe_extra_delay; - bool adv_extra_delay; - bool cs_extra_delay; - bool time_para_granularity; -}; +/* IRQ numbers in GPMC IRQ domain for legacy boot use */ +#define GPMC_IRQ_FIFOEVENTENABLE 0 +#define GPMC_IRQ_COUNT_EVENT 1 -/* - * Note that all values in this struct are in nanoseconds except sync_clk - * (which is in picoseconds), while the register values are in gpmc_fck cycles. +/** + * gpmc_nand_ops - Interface between NAND and GPMC + * @nand_write_buffer_empty: get the NAND write buffer empty status. */ -struct gpmc_timings { - /* Minimum clock period for synchronous mode (in picoseconds) */ - u32 sync_clk; - - /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ - u32 cs_on; /* Assertion time */ - u32 cs_rd_off; /* Read deassertion time */ - u32 cs_wr_off; /* Write deassertion time */ - - /* ADV signal timings corresponding to GPMC_CONFIG3 */ - u32 adv_on; /* Assertion time */ - u32 adv_rd_off; /* Read deassertion time */ - u32 adv_wr_off; /* Write deassertion time */ - u32 adv_aad_mux_on; /* ADV assertion time for AAD */ - u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ - u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ - - /* WE signals timings corresponding to GPMC_CONFIG4 */ - u32 we_on; /* WE assertion time */ - u32 we_off; /* WE deassertion time */ - - /* OE signals timings corresponding to GPMC_CONFIG4 */ - u32 oe_on; /* OE assertion time */ - u32 oe_off; /* OE deassertion time */ - u32 oe_aad_mux_on; /* OE assertion time for AAD */ - u32 oe_aad_mux_off; /* OE deassertion time for AAD */ - - /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ - u32 page_burst_access; /* Multiple access word delay */ - u32 access; /* Start-cycle to first data valid delay */ - u32 rd_cycle; /* Total read cycle time */ - u32 wr_cycle; /* Total write cycle time */ - - u32 bus_turnaround; - u32 cycle2cycle_delay; - - u32 wait_monitoring; - u32 clk_activation; - - /* The following are only on OMAP3430 */ - u32 wr_access; /* WRACCESSTIME */ - u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ - - struct gpmc_bool_timings bool_timings; +struct gpmc_nand_ops { + bool (*nand_writebuffer_empty)(void); }; -/* Device timings in picoseconds */ -struct gpmc_device_timings { - u32 t_ceasu; /* address setup to CS valid */ - u32 t_avdasu; /* address setup to ADV valid */ - /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is - * of tusb using these timings even for sync whilst - * ideally for adv_rd/(wr)_off it should have considered - * t_avdh instead. This indirectly necessitates r/w - * variations of t_avdp as it is possible to have one - * sync & other async - */ - u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ - u32 t_avdp_w; - u32 t_aavdh; /* address hold time */ - u32 t_oeasu; /* address setup to OE valid */ - u32 t_aa; /* access time from ADV assertion */ - u32 t_iaa; /* initial access time */ - u32 t_oe; /* access time from OE assertion */ - u32 t_ce; /* access time from CS asertion */ - u32 t_rd_cycle; /* read cycle time */ - u32 t_cez_r; /* read CS deassertion to high Z */ - u32 t_cez_w; /* write CS deassertion to high Z */ - u32 t_oez; /* OE deassertion to high Z */ - u32 t_weasu; /* address setup to WE valid */ - u32 t_wpl; /* write assertion time */ - u32 t_wph; /* write deassertion time */ - u32 t_wr_cycle; /* write cycle time */ - - u32 clk; - u32 t_bacc; /* burst access valid clock to output delay */ - u32 t_ces; /* CS setup time to clk */ - u32 t_avds; /* ADV setup time to clk */ - u32 t_avdh; /* ADV hold time from clk */ - u32 t_ach; /* address hold time from clk */ - u32 t_rdyo; /* clk to ready valid */ - - u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ - u32 t_ce_avd; /* CS on to ADV on delay */ - - /* XXX: check the possibility of combining - * cyc_aavhd_oe & cyc_aavdh_we - */ - u8 cyc_aavdh_oe;/* read address hold time in cycles */ - u8 cyc_aavdh_we;/* write address hold time in cycles */ - u8 cyc_oe; /* access time from OE assertion in cycles */ - u8 cyc_wpl; /* write deassertion time in cycles */ - u32 cyc_iaa; /* initial access time in cycles */ - - /* extra delays */ - bool ce_xdelay; - bool avd_xdelay; - bool oe_xdelay; - bool we_xdelay; -}; +struct gpmc_nand_regs; -struct gpmc_settings { - bool burst_wrap; /* enables wrap bursting */ - bool burst_read; /* enables read page/burst mode */ - bool burst_write; /* enables write page/burst mode */ - bool device_nand; /* device is NAND */ - bool sync_read; /* enables synchronous reads */ - bool sync_write; /* enables synchronous writes */ - bool wait_on_read; /* monitor wait on reads */ - bool wait_on_write; /* monitor wait on writes */ - u32 burst_len; /* page/burst length */ - u32 device_width; /* device bus width (8 or 16 bit) */ - u32 mux_add_data; /* multiplex address & data */ - u32 wait_pin; /* wait-pin to be used */ -}; +#if IS_ENABLED(CONFIG_OMAP_GPMC) +struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs); +#else +static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs) +{ + return NULL; +} +#endif /* CONFIG_OMAP_GPMC */ + +/*--------------------------------*/ + +/* deprecated APIs */ +#if IS_ENABLED(CONFIG_OMAP_GPMC) +void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +#else +static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) +{ +} +#endif /* CONFIG_OMAP_GPMC */ +/*--------------------------------*/ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, struct gpmc_device_timings *dev_t); -struct gpmc_nand_regs; struct device_node; -extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); extern int gpmc_get_client_irq(unsigned irq_config); extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 587bbdd31f5a..c726bd833761 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -21,8 +21,6 @@ struct mbox_client; struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, const char *chan_name); -void omap_mbox_save_ctx(struct mbox_chan *chan); -void omap_mbox_restore_ctx(struct mbox_chan *chan); void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); diff --git a/include/linux/oom.h b/include/linux/oom.h index 628a43242a34..83469522690a 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -50,28 +50,33 @@ enum oom_scan_t { OOM_SCAN_SELECT, /* always select this thread first */ }; -/* Thread is the potential origin of an oom condition; kill first on oom */ -#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) - extern struct mutex oom_lock; static inline void set_current_oom_origin(void) { - current->signal->oom_flags |= OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = true; } static inline void clear_current_oom_origin(void) { - current->signal->oom_flags &= ~OOM_FLAG_ORIGIN; + current->signal->oom_flag_origin = false; } static inline bool oom_task_origin(const struct task_struct *p) { - return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); + return p->signal->oom_flag_origin; } extern void mark_oom_victim(struct task_struct *tsk); +#ifdef CONFIG_MMU +extern void try_oom_reaper(struct task_struct *tsk); +#else +static inline void try_oom_reaper(struct task_struct *tsk) +{ +} +#endif + extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); @@ -102,13 +107,24 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p); static inline bool task_will_free_mem(struct task_struct *task) { + struct signal_struct *sig = task->signal; + /* * A coredumping process may sleep for an extended period in exit_mm(), * so the oom killer cannot assume that the process will promptly exit * and release memory. */ - return (task->flags & PF_EXITING) && - !(task->signal->flags & SIGNAL_GROUP_COREDUMP); + if (sig->flags & SIGNAL_GROUP_COREDUMP) + return false; + + if (!(task->flags & PF_EXITING)) + return false; + + /* Make sure that the whole thread group is going down */ + if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT)) + return false; + + return true; } /* sysctls */ diff --git a/include/linux/padata.h b/include/linux/padata.h index 438694650471..113ee626a4dc 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst, extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, - cpumask_var_t cbcpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); extern int padata_register_cpumask_notifier(struct padata_instance *pinst, diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b052aa7b5b7..e5a32445f930 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -371,10 +371,15 @@ PAGEFLAG(Idle, idle, PF_ANY) #define PAGE_MAPPING_KSM 2 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) +static __always_inline int PageAnonHead(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + static __always_inline int PageAnon(struct page *page) { page = compound_head(page); - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; + return PageAnonHead(page); } #ifdef CONFIG_KSM @@ -474,7 +479,7 @@ static inline void ClearPageCompound(struct page *page) } #endif -#define PG_head_mask ((1L << PG_head)) +#define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); @@ -665,7 +670,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) } #ifdef CONFIG_MMU -#define __PG_MLOCKED (1 << PG_mlocked) +#define __PG_MLOCKED (1UL << PG_mlocked) #else #define __PG_MLOCKED 0 #endif @@ -675,11 +680,11 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ - (1 << PG_lru | 1 << PG_locked | \ - 1 << PG_private | 1 << PG_private_2 | \ - 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED) + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -690,10 +695,10 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ - (1 << PG_private | 1 << PG_private_2) + (1UL << PG_private | 1UL << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index e596d5d9540e..8b5e0a9f2431 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v) static inline int page_ref_count(struct page *page) { - return atomic_read(&page->_count); + return atomic_read(&page->_refcount); } static inline int page_count(struct page *page) { - return atomic_read(&compound_head(page)->_count); + return atomic_read(&compound_head(page)->_refcount); } static inline void set_page_count(struct page *page, int v) { - atomic_set(&page->_count, v); + atomic_set(&page->_refcount, v); if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) __page_ref_set(page, v); } @@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page) static inline void page_ref_add(struct page *page, int nr) { - atomic_add(nr, &page->_count); + atomic_add(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, nr); } static inline void page_ref_sub(struct page *page, int nr) { - atomic_sub(nr, &page->_count); + atomic_sub(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -nr); } static inline void page_ref_inc(struct page *page) { - atomic_inc(&page->_count); + atomic_inc(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, 1); } static inline void page_ref_dec(struct page *page) { - atomic_dec(&page->_count); + atomic_dec(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) __page_ref_mod(page, -1); } static inline int page_ref_sub_and_test(struct page *page, int nr) { - int ret = atomic_sub_and_test(nr, &page->_count); + int ret = atomic_sub_and_test(nr, &page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); @@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr) static inline int page_ref_dec_and_test(struct page *page) { - int ret = atomic_dec_and_test(&page->_count); + int ret = atomic_dec_and_test(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); @@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page) static inline int page_ref_dec_return(struct page *page) { - int ret = atomic_dec_return(&page->_count); + int ret = atomic_dec_return(&page->_refcount); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) __page_ref_mod_and_return(page, -1, ret); @@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page) static inline int page_ref_add_unless(struct page *page, int nr, int u) { - int ret = atomic_add_unless(&page->_count, nr, u); + int ret = atomic_add_unless(&page->_refcount, nr, u); if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); @@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u) static inline int page_ref_freeze(struct page *page, int count) { - int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); + int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) __page_ref_freeze(page, count, ret); @@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - atomic_set(&page->_count, count); + atomic_set(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7e1ab155c67c..97354102794d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -90,12 +90,12 @@ void release_pages(struct page **pages, int nr, bool cold); /* * speculatively take a reference to a page. - * If the page is free (_count == 0), then _count is untouched, and 0 - * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * If the page is free (_refcount == 0), then _refcount is untouched, and 0 + * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. * * This function must be called inside the same rcu_read_lock() section as has * been used to lookup the page in the pagecache radix-tree (or page table): - * this allows allocators to use a synchronize_rcu() to stabilize _count. + * this allows allocators to use a synchronize_rcu() to stabilize _refcount. * * Unless an RCU grace period has passed, the count of all pages coming out * of the allocator must be considered unstable. page_count may return higher @@ -111,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold); * 2. conditionally increment refcount * 3. check the page is still in pagecache (if no, goto 1) * - * Remove-side that cares about stability of _count (eg. reclaim) has the + * Remove-side that cares about stability of _refcount (eg. reclaim) has the * following (with tree_lock held for write): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache @@ -518,33 +518,27 @@ void page_endio(struct page *page, int rw, int err); extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* - * Fault a userspace page into pagetables. Return non-zero on a fault. - * - * This assumes that two userspace pages are always sufficient. + * Fault one or two userspace pages into pagetables. + * Return -EINVAL if more than two pages would be needed. + * Return non-zero on a fault. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { - int ret; + int span, ret; if (unlikely(size == 0)) return 0; + span = offset_in_page(uaddr) + size; + if (span > 2 * PAGE_SIZE) + return -EINVAL; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ ret = __put_user(0, uaddr); - if (ret == 0) { - char __user *end = uaddr + size - 1; - - /* - * If the page was already mapped, this will get a cache miss - * for sure, so try to avoid doing it. - */ - if (((unsigned long)uaddr & PAGE_MASK) != - ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); - } + if (ret == 0 && span > PAGE_SIZE) + ret = __put_user(0, uaddr + size - 1); return ret; } diff --git a/include/linux/pci.h b/include/linux/pci.h index 932ec74909c6..b67e4df20801 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -166,8 +166,6 @@ enum pci_dev_flags { PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), /* Flag for quirk use to store if quirk-specific ACS is enabled */ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), - /* Flag to indicate the device uses dma_alias_devfn */ - PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), /* Do not use bus resets for device */ @@ -273,7 +271,7 @@ struct pci_dev { u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ - u8 dma_alias_devfn;/* devfn of DMA alias, if any */ + unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ struct pci_driver *driver; /* which driver has allocated this device */ u64 dma_mask; /* Mask of the bits of bus address this @@ -1165,6 +1163,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); +int pci_register_io_range(phys_addr_t addr, resource_size_t size); +unsigned long pci_address_to_pio(phys_addr_t addr); +phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) @@ -1481,6 +1482,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } + static inline void pci_block_cfg_access(struct pci_dev *dev) { } static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) { return 0; } @@ -1664,7 +1667,7 @@ enum pci_fixup_pass { #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); -void pci_dev_specific_enable_acs(struct pci_dev *dev); +int pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } @@ -1673,7 +1676,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, { return -ENOTTY; } -static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { } +static inline int pci_dev_specific_enable_acs(struct pci_dev *dev) +{ + return -ENOTTY; +} #endif void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); @@ -1989,6 +1995,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) } #endif +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); +bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 247da8c95860..c58752fe16c4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2604,6 +2604,24 @@ #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ +#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a +#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 +#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index 4f1089f2cc98..afcd130ab3a9 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -21,6 +21,8 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) +#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ +#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) struct pcie_device { int irq; /* Service IRQ/MSI/MSI-X Vector */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4bc6dafb703e..56939d3f6e53 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -129,7 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) -/* To avoid include hell, as printk can not declare this, we declare it here */ -DECLARE_PER_CPU(printk_func_t, printk_func); - #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 44f33834ad78..1a827cecd62f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -61,6 +61,14 @@ struct perf_callchain_entry { __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ }; +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; +}; + struct perf_raw_record { u32 size; void *data; @@ -1061,20 +1069,36 @@ extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); -extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); -extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, - bool crosstask, bool add_mark); + u32 max_stack, bool crosstask, bool add_mark); extern int get_callchain_buffers(void); extern void put_callchain_buffers(void); extern int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_contexts_per_stack; + +static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->contexts; + return 0; + } else { + ctx->contexts_maxed = true; + return -1; /* no more room, stop walking the stack */ + } +} -static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) { - if (entry->nr < sysctl_perf_event_max_stack) { + if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { + struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; + ++ctx->nr; return 0; } else { return -1; /* no more room, stop walking the stack */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 9ba59fcba549..a42e57da270d 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -144,6 +144,12 @@ struct pinctrl_desc { extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); extern void pinctrl_unregister(struct pinctrl_dev *pctldev); +extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, + struct pinctrl_desc *pctldesc, + void *driver_data); +extern void devm_pinctrl_unregister(struct device *dev, + struct pinctrl_dev *pctldev); + extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index dc9a13e5acda..be830b141d83 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h @@ -26,7 +26,7 @@ * * An example in pseudo code for a setup() callback: * - * void get_mac_addr(struct mvmem_device *nvmem, void *context) + * void get_mac_addr(struct nvmem_device *nvmem, void *context) * { * u8 *mac_addr = ethernet_pdata->mac_addr; * off_t offset = context; diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 03b6095d3b18..d15d8ba8cc24 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -21,15 +21,15 @@ * @dma_dev: required DMA master device * @src_id: src request line * @dst_id: dst request line - * @src_master: src master for transfers on allocated channel. - * @dst_master: dest master for transfers on allocated channel. + * @m_master: memory master for transfers on allocated channel + * @p_master: peripheral master for transfers on allocated channel */ struct dw_dma_slave { struct device *dma_dev; u8 src_id; u8 dst_id; - u8 src_master; - u8 dst_master; + u8 m_master; + u8 p_master; }; /** @@ -43,7 +43,7 @@ struct dw_dma_slave { * @block_size: Maximum block size supported by the controller * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master - * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) + * (in bytes, power of 2) */ struct dw_dma_platform_data { unsigned int nr_channels; @@ -55,7 +55,7 @@ struct dw_dma_platform_data { #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ unsigned char chan_priority; - unsigned short block_size; + unsigned int block_size; unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h new file mode 100644 index 000000000000..67ccdb0e1606 --- /dev/null +++ b/include/linux/platform_data/gpmc-omap.h @@ -0,0 +1,172 @@ +/* + * OMAP GPMC Platform data + * + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Roger Quadros <rogerq@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _GPMC_OMAP_H_ +#define _GPMC_OMAP_H_ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + u32 adv_aad_mux_on; /* ADV assertion time for AAD */ + u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ + u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + u32 oe_aad_mux_on; /* OE assertion time for AAD */ + u32 oe_aad_mux_off; /* OE deassertion time for AAD */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +/* Data for each chip select */ +struct gpmc_omap_cs_data { + bool valid; /* data is valid */ + bool is_nand; /* device within this CS is NAND */ + struct gpmc_settings *settings; + struct gpmc_device_timings *device_timings; + struct gpmc_timings *gpmc_timings; + struct platform_device *pdev; /* device within this CS region */ + unsigned int pdata_size; +}; + +struct gpmc_omap_platform_data { + struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; +}; + +#endif /* _GPMC_OMAP_H */ diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index ad3aa7b95f35..554b59801aa8 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -16,13 +16,16 @@ /** * struct inv_mpu6050_platform_data - Platform data for the mpu driver - * @orientation: Orientation matrix of the chip + * @orientation: Orientation matrix of the chip (deprecated in favor of + * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to * work on this platform. The orientation matricies are 3x3 rotation matricies * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. + * + * Deprecated in favor of mounting matrix retrieved from device-tree. */ struct inv_mpu6050_platform_data { __s8 orientation[9]; diff --git a/include/linux/platform_data/mailbox-omap.h b/include/linux/platform_data/mailbox-omap.h deleted file mode 100644 index 4631dbb4255e..000000000000 --- a/include/linux/platform_data/mailbox-omap.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * mailbox-omap.h - * - * Copyright (C) 2013 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _PLAT_MAILBOX_H -#define _PLAT_MAILBOX_H - -/* Interrupt register configuration types */ -#define MBOX_INTR_CFG_TYPE1 (0) -#define MBOX_INTR_CFG_TYPE2 (1) - -/** - * struct omap_mbox_dev_info - OMAP mailbox device attribute info - * @name: name of the mailbox device - * @tx_id: mailbox queue id used for transmitting messages - * @rx_id: mailbox queue id on which messages are received - * @irq_id: irq identifier number to use from the hwmod data - * @usr_id: mailbox user id for identifying the interrupt into - * the MPU interrupt controller. - */ -struct omap_mbox_dev_info { - const char *name; - u32 tx_id; - u32 rx_id; - u32 irq_id; - u32 usr_id; -}; - -/** - * struct omap_mbox_pdata - OMAP mailbox platform data - * @intr_type: type of interrupt configuration registers used - while programming mailbox queue interrupts - * @num_users: number of users (processor devices) that the mailbox - * h/w block can interrupt - * @num_fifos: number of h/w fifos within the mailbox h/w block - * @info_cnt: number of mailbox devices for the platform - * @info: array of mailbox device attributes - */ -struct omap_mbox_pdata { - u32 intr_type; - u32 num_users; - u32 num_fifos; - u32 info_cnt; - struct omap_mbox_dev_info *info; -}; - -#endif /* _PLAT_MAILBOX_H */ diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 090bbab0130a..17d57a18bac5 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -45,7 +45,6 @@ enum omap_ecc { }; struct gpmc_nand_regs { - void __iomem *gpmc_status; void __iomem *gpmc_nand_command; void __iomem *gpmc_nand_address; void __iomem *gpmc_nand_data; @@ -64,21 +63,24 @@ struct gpmc_nand_regs { void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; + /* Deprecated. Do not use */ + void __iomem *gpmc_status; }; struct omap_nand_platform_data { int cs; struct mtd_partition *parts; int nr_parts; - bool dev_ready; bool flash_bbt; enum nand_io xfer_type; int devsize; enum omap_ecc ecc_opt; - struct gpmc_nand_regs reg; - /* for passing the partitions */ - struct device_node *of_node; struct device_node *elm_of_node; + + /* deprecated */ + struct gpmc_nand_regs reg; + struct device_node *of_node; + bool dev_ready; }; #endif diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 753839187ba0..79b0e4cdb814 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -16,9 +16,11 @@ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * @open_drain: set the interrupt line to be open drain if possible. */ struct st_sensors_platform_data { u8 drdy_int_pin; + bool open_drain; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/poll.h b/include/linux/poll.h index 9fb4f40d9a26..37b057b63b46 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); -extern u64 select_estimate_accuracy(struct timespec *tv); +extern u64 select_estimate_accuracy(struct timespec64 *tv); static inline int poll_schedule(struct poll_wqueues *pwq, int state) @@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset) #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) -extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); +extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time); extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, - struct timespec *end_time); + struct timespec64 *end_time); extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec *end_time); + fd_set __user *exp, struct timespec64 *end_time); -extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); +extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, + long nsec); #endif /* _LINUX_POLL_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 9ccbdf2c1453..f4da695fd615 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -122,7 +122,19 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); +#ifdef CONFIG_PRINTK_NMI +extern void printk_nmi_init(void); +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_flush(void); +extern void printk_nmi_flush_on_panic(void); +#else +static inline void printk_nmi_init(void) { } +static inline void printk_nmi_enter(void) { } +static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_flush(void) { } +static inline void printk_nmi_flush_on_panic(void) { } +#endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) diff --git a/include/linux/pwm.h b/include/linux/pwm.h index b78d27c42629..908b67c847cd 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -5,59 +5,7 @@ #include <linux/mutex.h> #include <linux/of.h> -struct pwm_device; struct seq_file; - -#if IS_ENABLED(CONFIG_PWM) -/* - * pwm_request - request a PWM device - */ -struct pwm_device *pwm_request(int pwm_id, const char *label); - -/* - * pwm_free - free a PWM device - */ -void pwm_free(struct pwm_device *pwm); - -/* - * pwm_config - change a PWM device configuration - */ -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); - -/* - * pwm_enable - start a PWM output toggling - */ -int pwm_enable(struct pwm_device *pwm); - -/* - * pwm_disable - stop a PWM output toggling - */ -void pwm_disable(struct pwm_device *pwm); -#else -static inline struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - return ERR_PTR(-ENODEV); -} - -static inline void pwm_free(struct pwm_device *pwm) -{ -} - -static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - return -EINVAL; -} - -static inline int pwm_enable(struct pwm_device *pwm) -{ - return -EINVAL; -} - -static inline void pwm_disable(struct pwm_device *pwm) -{ -} -#endif - struct pwm_chip; /** @@ -94,8 +42,21 @@ struct pwm_args { enum { PWMF_REQUESTED = 1 << 0, - PWMF_ENABLED = 1 << 1, - PWMF_EXPORTED = 1 << 2, + PWMF_EXPORTED = 1 << 1, +}; + +/* + * struct pwm_state - state of a PWM channel + * @period: PWM period (in nanoseconds) + * @duty_cycle: PWM duty cycle (in nanoseconds) + * @polarity: PWM polarity + * @enabled: PWM enabled status + */ +struct pwm_state { + unsigned int period; + unsigned int duty_cycle; + enum pwm_polarity polarity; + bool enabled; }; /** @@ -106,11 +67,8 @@ enum { * @pwm: global index of the PWM device * @chip: PWM chip providing this PWM device * @chip_data: chip-private data associated with the PWM device - * @lock: used to serialize accesses to the PWM device where necessary - * @period: period of the PWM signal (in nanoseconds) - * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) - * @polarity: polarity of the PWM signal * @args: PWM arguments + * @state: curent PWM channel state */ struct pwm_device { const char *label; @@ -119,50 +77,68 @@ struct pwm_device { unsigned int pwm; struct pwm_chip *chip; void *chip_data; - struct mutex lock; - - unsigned int period; - unsigned int duty_cycle; - enum pwm_polarity polarity; struct pwm_args args; + struct pwm_state state; }; +/** + * pwm_get_state() - retrieve the current PWM state + * @pwm: PWM device + * @state: state to fill with the current PWM state + */ +static inline void pwm_get_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + *state = pwm->state; +} + static inline bool pwm_is_enabled(const struct pwm_device *pwm) { - return test_bit(PWMF_ENABLED, &pwm->flags); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.enabled; } static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) { if (pwm) - pwm->period = period; + pwm->state.period = period; } static inline unsigned int pwm_get_period(const struct pwm_device *pwm) { - return pwm ? pwm->period : 0; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.period; } static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) { if (pwm) - pwm->duty_cycle = duty; + pwm->state.duty_cycle = duty; } static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) { - return pwm ? pwm->duty_cycle : 0; -} + struct pwm_state state; -/* - * pwm_set_polarity - configure the polarity of a PWM signal - */ -int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); + pwm_get_state(pwm, &state); + + return state.duty_cycle; +} static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) { - return pwm ? pwm->polarity : PWM_POLARITY_NORMAL; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.polarity; } static inline void pwm_get_args(const struct pwm_device *pwm, @@ -171,12 +147,6 @@ static inline void pwm_get_args(const struct pwm_device *pwm, *args = pwm->args; } -static inline void pwm_apply_args(struct pwm_device *pwm) -{ - pwm_set_period(pwm, pwm->args.period); - pwm_set_polarity(pwm, pwm->args.polarity); -} - /** * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM @@ -185,6 +155,13 @@ static inline void pwm_apply_args(struct pwm_device *pwm) * @set_polarity: configure the polarity of this PWM * @enable: enable PWM output toggling * @disable: disable PWM output toggling + * @apply: atomically apply a new PWM config. The state argument + * should be adjusted with the real hardware config (if the + * approximate the period or duty_cycle value, state should + * reflect it) + * @get_state: get the current PWM state. This function is only + * called once per PWM device when the PWM chip is + * registered. * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs */ @@ -197,6 +174,10 @@ struct pwm_ops { enum pwm_polarity polarity); int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); + void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); #ifdef CONFIG_DEBUG_FS void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); #endif @@ -232,6 +213,118 @@ struct pwm_chip { }; #if IS_ENABLED(CONFIG_PWM) +/* PWM user APIs */ +struct pwm_device *pwm_request(int pwm_id, const char *label); +void pwm_free(struct pwm_device *pwm); +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); +int pwm_adjust_config(struct pwm_device *pwm); + +/** + * pwm_config() - change a PWM device configuration + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + if (duty_ns < 0 || period_ns < 0) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.duty_cycle == duty_ns && state.period == period_ns) + return 0; + + state.duty_cycle = duty_ns; + state.period = period_ns; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_set_polarity() - configure the polarity of a PWM signal + * @pwm: PWM device + * @polarity: new polarity of the PWM signal + * + * Note that the polarity cannot be configured while the PWM device is + * enabled. + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.polarity == polarity) + return 0; + + /* + * Changing the polarity of a running PWM without adjusting the + * dutycycle/period value is a bit risky (can introduce glitches). + * Return -EBUSY in this case. + * Note that this is allowed when using pwm_apply_state() because + * the user specifies all the parameters. + */ + if (state.enabled) + return -EBUSY; + + state.polarity = polarity; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_enable() - start a PWM output toggling + * @pwm: PWM device + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_enable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.enabled) + return 0; + + state.enabled = true; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_disable() - stop a PWM output toggling + * @pwm: PWM device + */ +static inline void pwm_disable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return; + + pwm_get_state(pwm, &state); + if (!state.enabled) + return; + + state.enabled = false; + pwm_apply_state(pwm, &state); +} + + +/* PWM provider APIs */ int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); @@ -257,6 +350,47 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm); bool pwm_can_sleep(struct pwm_device *pwm); #else +static inline struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_free(struct pwm_device *pwm) +{ +} + +static inline int pwm_apply_state(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return -ENOTSUPP; +} + +static inline int pwm_adjust_config(struct pwm_device *pwm) +{ + return -ENOTSUPP; +} + +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + return -EINVAL; +} + +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + return -ENOTSUPP; +} + +static inline int pwm_enable(struct pwm_device *pwm) +{ + return -EINVAL; +} + +static inline void pwm_disable(struct pwm_device *pwm) +{ +} + static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) { return -EINVAL; @@ -328,6 +462,34 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm) } #endif +static inline void pwm_apply_args(struct pwm_device *pwm) +{ + /* + * PWM users calling pwm_apply_args() expect to have a fresh config + * where the polarity and period are set according to pwm_args info. + * The problem is, polarity can only be changed when the PWM is + * disabled. + * + * PWM drivers supporting hardware readout may declare the PWM device + * as enabled, and prevent polarity setting, which changes from the + * existing behavior, where all PWM devices are declared as disabled + * at startup (even if they are actually enabled), thus authorizing + * polarity setting. + * + * Instead of setting ->enabled to false, we call pwm_disable() + * before pwm_set_polarity() to ensure that everything is configured + * as expected, and the PWM is really disabled when the user request + * it. + * + * Note that PWM users requiring a smooth handover between the + * bootloader and the kernel (like critical regulators controlled by + * PWM devices) will have to switch to the atomic API and avoid calling + * pwm_apply_args(). + */ + pwm_disable(pwm); + pwm_set_polarity(pwm, pwm->args.polarity); +} + struct pwm_lookup { struct list_head list; const char *provider; diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 51a97ac8bfbf..cb4b7e8cee81 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -29,51 +29,45 @@ #include <linux/rcupdate.h> /* - * An indirect pointer (root->rnode pointing to a radix_tree_node, rather - * than a data item) is signalled by the low bit set in the root->rnode - * pointer. - * - * In this case root->height is > 0, but the indirect pointer tests are - * needed for RCU lookups (because root->height is unreliable). The only - * time callers need worry about this is when doing a lookup_slot under - * RCU. - * - * Indirect pointer in fact is also used to tag the last pointer of a node - * when it is shrunk, before we rcu free the node. See shrink code for - * details. + * The bottom two bits of the slot determine how the remaining bits in the + * slot are interpreted: + * + * 00 - data pointer + * 01 - internal entry + * 10 - exceptional entry + * 11 - locked exceptional entry + * + * The internal entry may be a pointer to the next level in the tree, a + * sibling entry, or an indicator that the entry in this slot has been moved + * to another location in the tree and the lookup should be restarted. While + * NULL fits the 'data pointer' pattern, it means that there is no entry in + * the tree for this index (no matter what level of the tree it is found at). + * This means that you cannot store NULL in the tree as a value for the index. */ -#define RADIX_TREE_INDIRECT_PTR 1 +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 1UL + /* - * A common use of the radix tree is to store pointers to struct pages; - * but shmem/tmpfs needs also to store swap entries in the same tree: - * those are marked as exceptional entries to distinguish them. + * Most users of the radix tree store pointers but shmem/tmpfs stores swap + * entries in the same tree. They are marked as exceptional entries to + * distinguish them from pointers to struct page. * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. */ #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 -#define RADIX_DAX_MASK 0xf -#define RADIX_DAX_SHIFT 4 -#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) -#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) -#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) -#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ - RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) - -static inline int radix_tree_is_indirect_ptr(void *ptr) +static inline bool radix_tree_is_internal_node(void *ptr) { - return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; } /*** radix-tree API starts here ***/ #define RADIX_TREE_MAX_TAGS 3 -#ifdef __KERNEL__ +#ifndef RADIX_TREE_MAP_SHIFT #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) -#else -#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) @@ -86,16 +80,13 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* Height component in node->path */ -#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1) -#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1) - /* Internally used bits of node->count */ #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) struct radix_tree_node { - unsigned int path; /* Offset in parent & height from the bottom */ + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ unsigned int count; union { struct { @@ -115,13 +106,11 @@ struct radix_tree_node { /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ struct radix_tree_root { - unsigned int height; gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; }; #define RADIX_TREE_INIT(mask) { \ - .height = 0, \ .gfp_mask = (mask), \ .rnode = NULL, \ } @@ -131,11 +120,15 @@ struct radix_tree_root { #define INIT_RADIX_TREE(root, mask) \ do { \ - (root)->height = 0; \ (root)->gfp_mask = (mask); \ (root)->rnode = NULL; \ } while (0) +static inline bool radix_tree_empty(struct radix_tree_root *root) +{ + return root->rnode == NULL; +} + /** * Radix-tree synchronization * @@ -231,7 +224,7 @@ static inline void *radix_tree_deref_slot_protected(void **pslot, */ static inline int radix_tree_deref_retry(void *arg) { - return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); + return unlikely(radix_tree_is_internal_node(arg)); } /** @@ -252,8 +245,7 @@ static inline int radix_tree_exceptional_entry(void *arg) */ static inline int radix_tree_exception(void *arg) { - return unlikely((unsigned long)arg & - (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); + return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } /** @@ -266,7 +258,7 @@ static inline int radix_tree_exception(void *arg) */ static inline void radix_tree_replace_slot(void **pslot, void *item) { - BUG_ON(radix_tree_is_indirect_ptr(item)); + BUG_ON(radix_tree_is_internal_node(item)); rcu_assign_pointer(*pslot, item); } @@ -288,9 +280,12 @@ bool __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items); +struct radix_tree_node *radix_tree_replace_clear_tags( + struct radix_tree_root *root, + unsigned long index, void *entry); +unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, + void **results, unsigned long first_index, + unsigned int max_items); unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); @@ -327,8 +322,9 @@ static inline void radix_tree_preload_end(void) * struct radix_tree_iter - radix tree iterator state * * @index: index of current slot - * @next_index: next-to-last index for this chunk + * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating + * @shift: shift for the node that holds our slots * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is @@ -341,8 +337,20 @@ struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif }; +static inline unsigned int iter_shift(struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ @@ -402,6 +410,12 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) return NULL; } +static inline unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + (slots << iter_shift(iter)); +} + /** * radix_tree_iter_next - resume iterating when the chunk may be invalid * @iter: iterator state @@ -413,7 +427,7 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline __must_check void **radix_tree_iter_next(struct radix_tree_iter *iter) { - iter->next_index = iter->index + 1; + iter->next_index = __radix_tree_iter_add(iter, 1); iter->tags = 0; return NULL; } @@ -427,7 +441,12 @@ void **radix_tree_iter_next(struct radix_tree_iter *iter) static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return iter->next_index - iter->index; + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } /** @@ -445,24 +464,49 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { + void *canon = slot; + iter->tags >>= 1; + if (unlikely(!iter->tags)) + return NULL; + while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(slot[1])) { + if (entry_to_node(slot[1]) == canon) { + iter->tags >>= 1; + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + continue; + } + iter->next_index = __radix_tree_iter_add(iter, 1); + return NULL; + } if (likely(iter->tags & 1ul)) { - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); return slot + 1; } - if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { + if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); iter->tags >>= offset; - iter->index += offset + 1; + iter->index = __radix_tree_iter_add(iter, offset + 1); return slot + offset + 1; } } else { - long size = radix_tree_chunk_size(iter); + long count = radix_tree_chunk_size(iter); + void *canon = slot; - while (--size > 0) { + while (--count > 0) { slot++; - iter->index++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && + radix_tree_is_internal_node(*slot)) { + if (entry_to_node(*slot) == canon) + continue; + iter->next_index = iter->index; + break; + } + if (likely(*slot)) return slot; if (flags & RADIX_TREE_ITER_CONTIG) { @@ -476,34 +520,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } /** - * radix_tree_for_each_chunk - iterate over chunks - * - * @slot: the void** variable for pointer to chunk first slot - * @root: the struct radix_tree_root pointer - * @iter: the struct radix_tree_iter pointer - * @start: iteration starting index - * @flags: RADIX_TREE_ITER_* and tag index - * - * Locks can be released and reacquired between iterations. - */ -#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ - for (slot = radix_tree_iter_init(iter, start) ; \ - (slot = radix_tree_next_chunk(root, iter, flags)) ;) - -/** - * radix_tree_for_each_chunk_slot - iterate over slots in one chunk - * - * @slot: the void** variable, at the beginning points to chunk first slot - * @iter: the struct radix_tree_iter pointer - * @flags: RADIX_TREE_ITER_*, should be constant - * - * This macro is designed to be nested inside radix_tree_for_each_chunk(). - * @slot points to the radix tree slot, @iter->index contains its index. - */ -#define radix_tree_for_each_chunk_slot(slot, iter, flags) \ - for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) - -/** * radix_tree_for_each_slot - iterate over non-empty slots * * @slot: the void** variable for pointer to slot diff --git a/include/linux/random.h b/include/linux/random.h index 9c29122037f9..e47e533742b5 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,6 @@ extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -void generate_random_uuid(unsigned char uuid_out[16]); extern int random_int_secret_init(void); #ifndef MODULE diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 5a0b64cf68b4..b0f305e77b7f 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class; extern struct lock_class_key reservation_seqcount_class; extern const char reservation_seqcount_string[]; +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; struct fence __rcu *shared[]; }; +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + * @staged: staged copy of shared fences for RCU updates + */ struct reservation_object { struct ww_mutex lock; seqcount_t seq; @@ -68,6 +83,10 @@ struct reservation_object { #define reservation_object_assert_held(obj) \ lockdep_assert_held(&(obj)->lock.base) +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ static inline void reservation_object_init(struct reservation_object *obj) { @@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj) obj->staged = NULL; } +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ static inline void reservation_object_fini(struct reservation_object *obj) { @@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj) ww_mutex_destroy(&obj->lock); } +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ static inline struct reservation_object_list * reservation_object_get_list(struct reservation_object *obj) { @@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. The obj->lock must be held. + * + * RETURNS + * The exclusive fence or NULL + */ static inline struct fence * reservation_object_get_excl(struct reservation_object *obj) { @@ -120,6 +162,35 @@ reservation_object_get_excl(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct fence *fence; + unsigned seq; +retry: + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + fence = rcu_dereference(obj->fence_excl); + if (read_seqcount_retry(&obj->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + fence = fence_get(fence); + rcu_read_unlock(); + return fence; +} + int reservation_object_reserve_shared(struct reservation_object *obj); void reservation_object_add_shared_fence(struct reservation_object *obj, struct fence *fence); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index d1c12d160ace..d37fbb34d06f 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -156,6 +156,7 @@ extern void downgrade_write(struct rw_semaphore *sem); */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); # define down_write_nest_lock(sem, nest_lock) \ @@ -176,6 +177,7 @@ extern void up_read_non_owner(struct rw_semaphore *sem); # define down_read_nested(sem, subclass) down_read(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) +# define down_write_killable_nested(sem, subclass) down_write_killable(sem) # define down_read_non_owner(sem) down_read(sem) # define up_read_non_owner(sem) up_read(sem) #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 31bd0d97d178..253538f29ade 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -521,6 +521,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_REAPED 21 /* mm has been already reaped */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -668,6 +669,7 @@ struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; + atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */ struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ @@ -792,7 +794,11 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif - oom_flags_t oom_flags; + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ @@ -1533,6 +1539,7 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; unsigned :0; /* force alignment to the next boundary */ /* unserialized, strictly 'current' */ @@ -2248,6 +2255,7 @@ static inline void memalloc_noio_restore(unsigned int flags) #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ @@ -2271,6 +2279,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) + /* * task->jobctl flags */ @@ -2721,14 +2732,26 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct * mm) +static inline void mmdrop(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* @@ -2757,7 +2780,14 @@ static inline int copy_thread_tls( } #endif extern void flush_thread(void); -extern void exit_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif extern void exit_files(struct task_struct *); extern void __cleanup_sighand(struct sighand_struct *); @@ -2977,7 +3007,7 @@ static inline int object_is_on_stack(void *obj) return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } -extern void thread_info_cache_init(void); +extern void thread_stack_cache_init(void); #ifdef CONFIG_DEBUG_STACK_USAGE static inline unsigned long stack_not_used(struct task_struct *p) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index dacb5e711994..de1f64318fc4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -765,6 +765,8 @@ struct sctp_info { __u8 sctpi_s_disable_fragments; __u8 sctpi_s_v4mapped; __u8 sctpi_s_frag_interleave; + __u32 sctpi_s_type; + __u32 __reserved3; }; struct sctp_infox { diff --git a/include/linux/selection.h b/include/linux/selection.h index 85193aa8c1e3..8e4624efdb6f 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -24,10 +24,10 @@ extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); extern int console_blanked; -extern unsigned char color_table[]; -extern int default_red[]; -extern int default_grn[]; -extern int default_blu[]; +extern const unsigned char color_table[]; +extern unsigned char default_red[]; +extern unsigned char default_grn[]; +extern unsigned char default_blu[]; extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); extern u16 screen_glyph(struct vc_data *vc, int offset); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e0582106ef4f..ead97654c4e9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) static inline int raw_read_seqcount_latch(seqcount_t *s) { - return lockless_dereference(s->sequence); + int seq = READ_ONCE(s->sequence); + /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ + smp_read_barrier_depends(); + return seq; } /** @@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * unsigned seq, idx; * * do { - * seq = lockless_dereference(latch->seq); + * seq = raw_read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 434879759725..48ec7651989b 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -36,6 +36,7 @@ struct plat_serial8250_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned old); @@ -148,6 +149,7 @@ extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cbfcf38e220d..a3d7c0d4a03e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -123,6 +123,7 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); @@ -281,6 +282,8 @@ struct uart_state { enum uart_pm_state pm_state; struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; struct uart_port *uart_port; }; diff --git a/include/linux/signal.h b/include/linux/signal.h index 3fbe81444d31..b63f63eaa39c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -28,6 +28,21 @@ struct sigpending { sigset_t signal; }; +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif + /* * Define some primitives to manipulate sigset_t. */ @@ -385,7 +400,9 @@ int unhandled_signal(struct task_struct *tsk, int sig); #else #define rt_sigmask(sig) sigmask(sig) #endif -#define siginmask(sig, mask) (rt_sigmask(sig) & (mask)) + +#define siginmask(sig, mask) \ + ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) @@ -406,14 +423,10 @@ int unhandled_signal(struct task_struct *tsk, int sig); rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK)) +#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) +#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) +#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) +#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) #define sig_user_defined(t, signr) \ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c413c588a24f..ee38a4127475 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -471,9 +471,9 @@ enum { SKB_GSO_GRE_CSUM = 1 << 8, - SKB_GSO_IPIP = 1 << 9, + SKB_GSO_IPXIP4 = 1 << 9, - SKB_GSO_SIT = 1 << 10, + SKB_GSO_IPXIP6 = 1 << 10, SKB_GSO_UDP_TUNNEL = 1 << 11, @@ -2467,7 +2467,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, static inline struct page *dev_alloc_pages(unsigned int order) { - return __dev_alloc_pages(GFP_ATOMIC, order); + return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); } /** @@ -2485,7 +2485,7 @@ static inline struct page *__dev_alloc_page(gfp_t gfp_mask) static inline struct page *dev_alloc_page(void) { - return __dev_alloc_page(GFP_ATOMIC); + return dev_alloc_pages(0); } /** diff --git a/include/linux/slab.h b/include/linux/slab.h index 508bd827e6dc..aeb3e6d00a66 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* @@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { @@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f #endif #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_slab_alignment; + int node, size_t size) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, @@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #else static __always_inline void * kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9edbbf352340..8694f7a5d92b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -80,6 +80,10 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif +#ifdef CONFIG_SLAB_FREELIST_RANDOM + void *random_seq; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 665cd0cd18b8..d1faa019c02a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -111,22 +111,6 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) } #endif - -/** - * virt_to_obj - returns address of the beginning of object. - * @s: object's kmem_cache - * @slab_page: address of slab page - * @x: address within object memory range - * - * Returns address of the beginning of object - */ -static inline void *virt_to_obj(struct kmem_cache *s, - const void *slab_page, - const void *x) -{ - return (void *)x - ((x - slab_page) % s->size); -} - void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 857a9a1d82b5..1f03483f61e5 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -372,6 +372,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @unprepare_message: undo any work done by prepare_message(). * @spi_flash_read: to support spi-controller hardwares that provide * accelerated interface to read from flash devices. + * @flash_read_supported: spi device supports flash read * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). @@ -529,6 +530,7 @@ struct spi_master { struct spi_message *message); int (*spi_flash_read)(struct spi_device *spi, struct spi_flash_read_message *msg); + bool (*flash_read_supported)(struct spi_device *spi); /* * These hooks are for drivers that use a generic implementation @@ -1158,7 +1160,9 @@ struct spi_flash_read_message { /* SPI core interface for flash read support */ static inline bool spi_flash_read_supported(struct spi_device *spi) { - return spi->master->spi_flash_read ? true : false; + return spi->master->spi_flash_read && + (!spi->master->flash_read_supported || + spi->master->flash_read_supported(spi)); } int spi_flash_read(struct spi_device *spi, diff --git a/include/linux/stm.h b/include/linux/stm.h index 1a79ed8e43da..8369d8a8cabd 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -50,6 +50,8 @@ struct stm_device; * @sw_end: last STP master available to software * @sw_nchannels: number of STP channels per master * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @hw_override: masters in the STP stream will not match the ones + * assigned by software, but are up to the STM hardware * @packet: callback that sends an STP packet * @mmio_addr: mmap callback, optional * @link: called when a new stm_source gets linked to us, optional @@ -85,6 +87,7 @@ struct stm_data { unsigned int sw_end; unsigned int sw_nchannels; unsigned int sw_mmiosz; + unsigned int hw_override; ssize_t (*packet)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, diff --git a/include/linux/string.h b/include/linux/string.h index d3993a79a325..26b6f6a66f83 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new); extern void kfree_const(const void *x); -extern char *kstrdup(const char *s, gfp_t gfp); +extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index dabe643eb5fa..5ce9538f290e 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -3,6 +3,8 @@ #include <linux/types.h> +struct file; + /* Descriptions of the types of units to * print in */ enum string_size_units { @@ -68,4 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } +char *kstrdup_quotable(const char *src, gfp_t gfp); +char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); +char *kstrdup_quotable_file(struct file *file, gfp_t gfp); + #endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h new file mode 100644 index 000000000000..451771d9b9c0 --- /dev/null +++ b/include/linux/stringhash.h @@ -0,0 +1,76 @@ +#ifndef __LINUX_STRINGHASH_H +#define __LINUX_STRINGHASH_H + +#include <linux/compiler.h> /* For __pure */ +#include <linux/types.h> /* For u32, u64 */ + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash() 0 + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits) + */ +static inline unsigned long end_name_hash(unsigned long hash) +{ + return (unsigned int)hash; +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h> + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure full_name_hash(const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const char *name); + +#endif /* __LINUX_STRINGHASH_H */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 6a241a277249..899791573a40 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -127,7 +127,7 @@ struct rpc_authops { void (*destroy)(struct rpc_auth *); struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); - struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int); + struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); int (*list_pseudoflavors)(rpc_authflavor_t *, int); rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); int (*flavor2info)(rpc_authflavor_t, @@ -167,6 +167,7 @@ void rpc_destroy_authunix(void); struct rpc_cred * rpc_lookup_cred(void); struct rpc_cred * rpc_lookup_cred_nonblock(void); +struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t); struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); int rpcauth_register(const struct rpc_authops *); int rpcauth_unregister(const struct rpc_authops *); @@ -178,7 +179,7 @@ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t, int rpcauth_get_gssinfo(rpc_authflavor_t, struct rpcsec_gss_info *); int rpcauth_list_flavors(rpc_authflavor_t *, int); -struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); +struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); @@ -201,9 +202,28 @@ char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline struct rpc_cred * get_rpccred(struct rpc_cred *cred) { - atomic_inc(&cred->cr_count); + if (cred != NULL) + atomic_inc(&cred->cr_count); return cred; } +/** + * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer + * @cred: cred of which to take a reference + * + * In some cases, we may have a pointer to a credential to which we + * want to take a reference, but don't already have one. Because these + * objects are freed using RCU, we can access the cr_count while its + * on its way to destruction and only take a reference if it's not already + * zero. + */ +static inline struct rpc_cred * +get_rpccred_rcu(struct rpc_cred *cred) +{ + if (atomic_inc_not_zero(&cred->cr_count)) + return cred; + return NULL; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 9a7ddbaf116e..b6810c92b8bb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -137,8 +137,6 @@ struct rpc_create_args { #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) struct rpc_clnt *rpc_create(struct rpc_create_args *args); -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, - struct rpc_xprt *xprt); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, const struct rpc_program *, u32); struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); @@ -176,6 +174,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); +size_t rpc_max_bc_payload(struct rpc_clnt *); unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 807371357160..59cbf16eaeb5 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -158,9 +158,9 @@ typedef __be32 rpc_fraghdr; /* * Note that RFC 1833 does not put any size restrictions on the - * netid string, but all currently defined netid's fit in 4 bytes. + * netid string, but all currently defined netid's fit in 5 bytes. */ -#define RPCBIND_MAXNETIDLEN (4u) +#define RPCBIND_MAXNETIDLEN (5u) /* * Universal addresses are introduced in RFC 1833 and further spelled diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3081339968c3..d6917b896d3a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct xdr_buf *rcvbuf); /* svc_rdma_marshal.c */ -extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *); +extern int svc_rdma_xdr_decode_req(struct xdr_buf *); extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, struct rpcrdma_msg *, enum rpcrdma_errcode, __be32 *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index b7dabc4baafd..79ba50856707 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -84,6 +84,7 @@ struct svc_xprt { struct net *xpt_net; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ }; static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index c00f53a4ccdd..91d5a5d6f52b 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -16,6 +16,7 @@ #include <linux/sunrpc/cache.h> #include <linux/sunrpc/gss_api.h> #include <linux/hash.h> +#include <linux/stringhash.h> #include <linux/cred.h> struct svc_cred { @@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp); extern int unix_gid_cache_create(struct net *net); extern void unix_gid_cache_destroy(struct net *net); -static inline unsigned long hash_str(char *name, int bits) +/* + * The <stringhash.h> functions are good enough that we don't need to + * use hash_32() on them; just extracting the high bits is enough. + */ +static inline unsigned long hash_str(char const *name, int bits) { - unsigned long hash = 0; - unsigned long l = 0; - int len = 0; - unsigned char c; - do { - if (unlikely(!(c = *name++))) { - c = (char)len; len = -1; - } - l = (l << 8) | c; - len++; - if ((len & (BITS_PER_LONG/8-1))==0) - hash = hash_long(hash^l, BITS_PER_LONG); - } while (len); - return hash >> (BITS_PER_LONG - bits); + return hashlen_hash(hashlen_string(name)) >> (32 - bits); } -static inline unsigned long hash_mem(char *buf, int length, int bits) +static inline unsigned long hash_mem(char const *buf, int length, int bits) { - unsigned long hash = 0; - unsigned long l = 0; - int len = 0; - unsigned char c; - do { - if (len == length) { - c = (char)len; len = -1; - } else - c = *buf++; - l = (l << 8) | c; - len++; - if ((len & (BITS_PER_LONG/8-1))==0) - hash = hash_long(hash^l, BITS_PER_LONG); - } while (len); - return hash >> (BITS_PER_LONG - bits); + return full_name_hash(buf, length) >> (32 - bits); } #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index fb0d212e0d3a..5e3e1b63dbb3 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -142,6 +142,7 @@ struct rpc_xprt_ops { int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); int (*bc_up)(struct svc_serv *serv, struct net *net); + size_t (*bc_maxpayload)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); @@ -296,6 +297,7 @@ struct xprt_create { size_t addrlen; const char *servername; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *bc_xps; unsigned int flags; }; diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 767190b01363..39267dc3486a 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -52,7 +52,9 @@ #define RPCRDMA_DEF_SLOT_TABLE (128U) #define RPCRDMA_MAX_SLOT_TABLE (256U) -#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ +#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ +#define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */ +#define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */ /* Memory registration strategies, by number. * This is part of a kernel / user space API. Do not remove. */ diff --git a/include/linux/swap.h b/include/linux/swap.h index ad220359f1b0..0af2bb2028fd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -316,6 +316,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ +extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h new file mode 100644 index 000000000000..c6ffe8b0725c --- /dev/null +++ b/include/linux/sync_file.h @@ -0,0 +1,57 @@ +/* + * include/linux/sync_file.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_FILE_H +#define _LINUX_SYNC_FILE_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/fence.h> + +struct sync_file_cb { + struct fence_cb cb; + struct fence *fence; + struct sync_file *sync_file; +}; + +/** + * struct sync_file - sync file to export to the userspace + * @file: file representing this fence + * @kref: reference count on fence. + * @name: name of sync_file. Useful for debugging + * @sync_file_list: membership in global file list + * @num_fences: number of sync_pts in the fence + * @wq: wait queue for fence signaling + * @status: 0: signaled, >0:active, <0: error + * @cbs: sync_pts callback information + */ +struct sync_file { + struct file *file; + struct kref kref; + char name[32]; +#ifdef CONFIG_DEBUG_FS + struct list_head sync_file_list; +#endif + int num_fences; + + wait_queue_head_t wq; + atomic_t status; + + struct sync_file_cb cbs[]; +}; + +struct sync_file *sync_file_create(struct fence *fence); + +#endif /* _LINUX_SYNC_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d795472c54d8..d02239022bd0 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -371,10 +371,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, size_t sigsetsize); asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t __user *uinfo); -asmlinkage long sys_kill(int pid, int sig); -asmlinkage long sys_tgkill(int tgid, int pid, int sig); -asmlinkage long sys_tkill(int pid, int sig); -asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo); +asmlinkage long sys_kill(pid_t pid, int sig); +asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); +asmlinkage long sys_tkill(pid_t pid, int sig); +asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); asmlinkage long sys_sgetmask(void); asmlinkage long sys_ssetmask(int newmask); asmlinkage long sys_signal(int sig, __sighandler_t handler); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 1b8a5a7876ce..ee517bef0db0 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -335,11 +335,14 @@ struct thermal_genl_event { * @get_trend: a pointer to a function that reads the sensor temperature trend. * @set_emul_temp: a pointer to a function that sets sensor emulated * temperature. + * @set_trip_temp: a pointer to a function that sets the trip temperature on + * hardware. */ struct thermal_zone_of_device_ops { int (*get_temp)(void *, int *); int (*get_trend)(void *, long *); int (*set_emul_temp)(void *, int); + int (*set_trip_temp)(void *, int, int); }; /** diff --git a/include/linux/time64.h b/include/linux/time64.h index 367d5af899e8..7e5d2fa9ac46 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec * # define timespec64_equal timespec_equal # define timespec64_compare timespec_compare # define set_normalized_timespec64 set_normalized_timespec -# define timespec64_add_safe timespec_add_safe # define timespec64_add timespec_add # define timespec64_sub timespec_sub # define timespec64_valid timespec_valid @@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); -/* - * timespec64_add_safe assumes both values are positive and checks for - * overflow. It will return TIME_T_MAX if the returned value would be - * smaller then either of the arguments. - */ -extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, - const struct timespec64 rhs); - - static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { @@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) #endif +/* + * timespec64_add_safe assumes both values are positive and checks for + * overflow. It will return TIME64_MAX in case of overflow. + */ +extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, + const struct timespec64 rhs); + #endif /* _LINUX_TIME64_H */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 37dbacf84849..816b7543f81b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv, struct timespec64 ts64; if (!tv) + return do_sys_settimeofday64(NULL, tz); + + if (!timespec_valid(tv)) return -EINVAL; ts64 = timespec_to_timespec64(*tv); diff --git a/include/linux/timer.h b/include/linux/timer.h index 61aa61dc410c..20ac746f3eb3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -145,6 +145,8 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), 0) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) #define setup_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), 0) #define setup_deferrable_timer_on_stack(timer, fn, data) \ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 222f6aa0418f..be007610ceb0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -154,21 +154,6 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc); -struct ring_buffer_event * -trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, - int type, unsigned long len, - unsigned long flags, int pc); -void trace_buffer_unlock_commit(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc); -void trace_buffer_unlock_commit_regs(struct trace_array *tr, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc, - struct pt_regs *regs); -void trace_current_buffer_discard_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event); void tracing_record_cmdline(struct task_struct *tsk); @@ -229,7 +214,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_WAS_ENABLED_BIT, - TRACE_EVENT_FL_USE_CALL_FILTER_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, @@ -244,7 +228,6 @@ enum { * WAS_ENABLED - Set and stays set when an event was ever enabled * (used for module unloading, if a module event is enabled, * it is best to clear the buffers that used it). - * USE_CALL_FILTER - For trace internal events, don't use file filter * TRACEPOINT - Event is a tracepoint * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe @@ -255,7 +238,6 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), - TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), @@ -407,16 +389,12 @@ enum event_trigger_type { ETT_SNAPSHOT = (1 << 1), ETT_STACKTRACE = (1 << 2), ETT_EVENT_ENABLE = (1 << 3), + ETT_EVENT_HIST = (1 << 4), + ETT_HIST_ENABLE = (1 << 5), }; extern int filter_match_preds(struct event_filter *filter, void *rec); -extern int filter_check_discard(struct trace_event_file *file, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); -extern int call_filter_check_discard(struct trace_event_call *call, void *rec, - struct ring_buffer *buffer, - struct ring_buffer_event *event); extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, void *rec); extern void event_triggers_post_call(struct trace_event_file *file, @@ -450,100 +428,6 @@ trace_trigger_soft_disabled(struct trace_event_file *file) return false; } -/* - * Helper function for event_trigger_unlock_commit{_regs}(). - * If there are event triggers attached to this event that requires - * filtering against its fields, then they wil be called as the - * entry already holds the field information of the current event. - * - * It also checks if the event should be discarded or not. - * It is to be discarded if the event is soft disabled and the - * event was only recorded to process triggers, or if the event - * filter is active and this event did not match the filters. - * - * Returns true if the event is discarded, false otherwise. - */ -static inline bool -__event_trigger_test_discard(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, - enum event_trigger_type *tt) -{ - unsigned long eflags = file->flags; - - if (eflags & EVENT_FILE_FL_TRIGGER_COND) - *tt = event_triggers_call(file, entry); - - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) - ring_buffer_discard_commit(buffer, event); - else if (!filter_check_discard(file, entry, buffer, event)) - return false; - - return true; -} - -/** - * event_trigger_unlock_commit - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - */ -static inline void -event_trigger_unlock_commit(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - -/** - * event_trigger_unlock_commit_regs - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event - * @buffer: The ring buffer that the event is being written to - * @event: The event meta data in the ring buffer - * @entry: The event itself - * @irq_flags: The state of the interrupts at the start of the event - * @pc: The state of the preempt count at the start of the event. - * - * This is a helper function to handle triggers that require data - * from the event itself. It also tests the event against filters and - * if the event is soft disabled and should be discarded. - * - * Same as event_trigger_unlock_commit() but calls - * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). - */ -static inline void -event_trigger_unlock_commit_regs(struct trace_event_file *file, - struct ring_buffer *buffer, - struct ring_buffer_event *event, - void *entry, unsigned long irq_flags, int pc, - struct pt_regs *regs) -{ - enum event_trigger_type tt = ETT_NONE; - - if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(file->tr, buffer, event, - irq_flags, pc, regs); - - if (tt) - event_triggers_post_call(file, tt, entry); -} - #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); #else diff --git a/include/linux/tty.h b/include/linux/tty.h index 3b09f235db66..40144f382516 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -228,7 +228,8 @@ struct tty_port { int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* TTY flags ASY_*/ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ unsigned char console:1, /* port is a console */ low_latency:1; /* optional: tune for latency */ struct mutex mutex; /* Locking */ @@ -242,6 +243,18 @@ struct tty_port { struct kref kref; /* Ref counter */ }; +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ + /* * Where all of the state associated with a tty is kept while the tty * is open. Since the termios state should be kept even if the tty @@ -338,7 +351,6 @@ struct tty_file_private { #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ -#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ @@ -360,6 +372,16 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val) smp_mb(); } +static inline bool tty_io_error(struct tty_struct *tty) +{ + return test_bit(TTY_IO_ERROR, &tty->flags); +} + +static inline bool tty_throttled(struct tty_struct *tty) +{ + return test_bit(TTY_THROTTLED, &tty->flags); +} + #ifdef CONFIG_TTY extern void console_init(void); extern void tty_kref_put(struct tty_struct *tty); @@ -371,6 +393,7 @@ extern void proc_clear_tty(struct task_struct *p); extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); +extern const char *tty_name(const struct tty_struct *tty); #else static inline void console_init(void) { } @@ -391,6 +414,8 @@ static inline struct tty_struct *get_current_tty(void) /* tty_io.c */ static inline int __init tty_init(void) { return 0; } +static inline const char *tty_name(const struct tty_struct *tty) +{ return "(none)"; } #endif extern struct ktermios tty_std_termios; @@ -415,7 +440,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern const char *tty_name(const struct tty_struct *tty); extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); @@ -457,6 +481,7 @@ extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); extern bool tty_buffer_restart_work(struct tty_port *port); extern bool tty_buffer_cancel_work(struct tty_port *port); +extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, @@ -537,7 +562,67 @@ static inline struct tty_port *tty_port_get(struct tty_port *port) /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(struct tty_port *port) { - return port->flags & ASYNC_CTS_FLOW; + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CTS_FLOW, &port->iflags); + else + clear_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline bool tty_port_active(struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_ACTIVE, &port->iflags); + else + clear_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline bool tty_port_check_carrier(struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_CHECK_CD, &port->iflags); + else + clear_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline bool tty_port_suspended(struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_SUSPENDED, &port->iflags); + else + clear_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline bool tty_port_initialized(struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + if (val) + set_bit(TTY_PORT_INITIALIZED, &port->iflags); + else + clear_bit(TTY_PORT_INITIALIZED, &port->iflags); } extern struct tty_struct *tty_port_tty_get(struct tty_port *port); diff --git a/include/linux/types.h b/include/linux/types.h index 70dd3dfde631..baf718324f4a 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -156,7 +156,6 @@ typedef u32 dma_addr_t; typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; -typedef unsigned __bitwise__ oom_flags_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; diff --git a/include/linux/usb.h b/include/linux/usb.h index 6a9a0c28415d..eba1f10e8cfd 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -374,13 +374,12 @@ struct usb_bus { int devnum_next; /* Next open device number in * round-robin allocation */ + struct mutex devnum_next_mutex; /* devnum_next mutex */ struct usb_devmap devmap; /* device address allocation map */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ - struct mutex usb_address0_mutex; /* unaddressed device mutex */ - int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? @@ -720,7 +719,7 @@ extern void usb_enable_ltm(struct usb_device *udev); static inline bool usb_device_supports_ltm(struct usb_device *udev) { - if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) + if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) return false; return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; } @@ -1069,7 +1068,7 @@ struct usbdrv_wrap { * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. - * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs + * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * @@ -1569,7 +1568,7 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * Initializes a interrupt urb with the proper information needed to submit * it to a device. * - * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). @@ -1595,7 +1594,7 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->complete = complete_fn; urb->context = context; - if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) { + if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 5d4e151c49bf..fefe8b06a63d 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, * this driver will be bound to any available UDC. * @pending: UDC core private data used for deferred probe of this driver. + * @match_existing_only: If udc is not found, return an error and don't add this + * gadget driver to list of pending driver * * Devices are disabled till a gadget driver successfully bind()s, which * means the driver will handle setup() requests needed to enumerate (and @@ -1097,6 +1099,7 @@ struct usb_gadget_driver { char *udc_name; struct list_head pending; + unsigned match_existing_only:1; }; @@ -1223,9 +1226,13 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget, /* utility to simplify map/unmap of usb_requests to/from DMA */ +extern int usb_gadget_map_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); +extern void usb_gadget_unmap_request_by_dev(struct device *dev, + struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index b98f831dcda3..66fc13705ab7 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -181,6 +181,7 @@ struct usb_hcd { * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ + struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 0b3da40a525e..d315c8907869 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -142,10 +142,11 @@ enum musb_vbus_id_status { }; #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) -void musb_mailbox(enum musb_vbus_id_status status); +int musb_mailbox(enum musb_vbus_id_status status); #else -static inline void musb_mailbox(enum musb_vbus_id_status status) +static inline int musb_mailbox(enum musb_vbus_id_status status) { + return 0; } #endif diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 24198e16f849..7a0350535cb1 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -72,37 +72,113 @@ enum otg_fsm_timer { NUM_OTG_FSM_TIMERS, }; -/* OTG state machine according to the OTG spec */ +/** + * struct otg_fsm - OTG state machine according to the OTG spec + * + * OTG hardware Inputs + * + * Common inputs for A and B device + * @id: TRUE for B-device, FALSE for A-device. + * @adp_change: TRUE when current ADP measurement (n) value, compared to the + * ADP measurement taken at n-2, differs by more than CADP_THR + * @power_up: TRUE when the OTG device first powers up its USB system and + * ADP measurement taken if ADP capable + * + * A-Device state inputs + * @a_srp_det: TRUE if the A-device detects SRP + * @a_vbus_vld: TRUE when VBUS voltage is in regulation + * @b_conn: TRUE if the A-device detects connection from the B-device + * @a_bus_resume: TRUE when the B-device detects that the A-device is signaling + * a resume (K state) + * B-Device state inputs + * @a_bus_suspend: TRUE when the B-device detects that the A-device has put the + * bus into suspend + * @a_conn: TRUE if the B-device detects a connection from the A-device + * @b_se0_srp: TRUE when the line has been at SE0 for more than the minimum + * time before generating SRP + * @b_ssend_srp: TRUE when the VBUS has been below VOTG_SESS_VLD for more than + * the minimum time before generating SRP + * @b_sess_vld: TRUE when the B-device detects that the voltage on VBUS is + * above VOTG_SESS_VLD + * @test_device: TRUE when the B-device switches to B-Host and detects an OTG + * test device. This must be set by host/hub driver + * + * Application inputs (A-Device) + * @a_bus_drop: TRUE when A-device application needs to power down the bus + * @a_bus_req: TRUE when A-device application wants to use the bus. + * FALSE to suspend the bus + * + * Application inputs (B-Device) + * @b_bus_req: TRUE during the time that the Application running on the + * B-device wants to use the bus + * + * Auxilary inputs (OTG v1.3 only. Obsolete now.) + * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD + * @b_bus_suspend: TRUE when the A-device detects that the B-device has put + * the bus into suspend + * @b_bus_resume: TRUE when the A-device detects that the B-device is signaling + * resume on the bus + * + * OTG Output status. Read only for users. Updated by OTG FSM helpers defined + * in this file + * + * Outputs for Both A and B device + * @drv_vbus: TRUE when A-device is driving VBUS + * @loc_conn: TRUE when the local device has signaled that it is connected + * to the bus + * @loc_sof: TRUE when the local device is generating activity on the bus + * @adp_prb: TRUE when the local device is in the process of doing + * ADP probing + * + * Outputs for B-device state + * @adp_sns: TRUE when the B-device is in the process of carrying out + * ADP sensing + * @data_pulse: TRUE when the B-device is performing data line pulsing + * + * Internal Variables + * + * a_set_b_hnp_en: TRUE when the A-device has successfully set the + * b_hnp_enable bit in the B-device. + * Unused as OTG fsm uses otg->host->b_hnp_enable instead + * b_srp_done: TRUE when the B-device has completed initiating SRP + * b_hnp_enable: TRUE when the B-device has accepted the + * SetFeature(b_hnp_enable) B-device. + * Unused as OTG fsm uses otg->gadget->b_hnp_enable instead + * a_clr_err: Asserted (by application ?) to clear a_vbus_err due to an + * overcurrent condition and causes the A-device to transition + * to a_wait_vfall + */ struct otg_fsm { /* Input */ int id; int adp_change; int power_up; - int test_device; - int a_bus_drop; - int a_bus_req; int a_srp_det; int a_vbus_vld; int b_conn; int a_bus_resume; int a_bus_suspend; int a_conn; - int b_bus_req; int b_se0_srp; int b_ssend_srp; int b_sess_vld; + int test_device; + int a_bus_drop; + int a_bus_req; + int b_bus_req; + /* Auxilary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; /* Output */ - int data_pulse; int drv_vbus; int loc_conn; int loc_sof; int adp_prb; int adp_sns; + int data_pulse; /* Internal variables */ int a_set_b_hnp_en; @@ -110,7 +186,7 @@ struct otg_fsm { int b_hnp_enable; int a_clr_err; - /* Informative variables */ + /* Informative variables. All unused as of now */ int a_bus_drop_inf; int a_bus_req_inf; int a_clr_err_inf; @@ -134,6 +210,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool state_changed; }; struct otg_fsm_ops { diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 6df2509033d7..2d095fc60204 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -1,7 +1,7 @@ /* * UUID/GUID definition * - * Copyright (C) 2010, Intel Corp. + * Copyright (C) 2010, 2016 Intel Corp. * Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or @@ -12,16 +12,17 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_UUID_H_ #define _LINUX_UUID_H_ #include <uapi/linux/uuid.h> +/* + * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") + * not including trailing NUL. + */ +#define UUID_STRING_LEN 36 static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) { @@ -33,7 +34,17 @@ static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) return memcmp(&u1, &u2, sizeof(uuid_be)); } +void generate_random_uuid(unsigned char uuid[16]); + extern void uuid_le_gen(uuid_le *u); extern void uuid_be_gen(uuid_be *u); +bool __must_check uuid_is_valid(const char *uuid); + +extern const u8 uuid_le_index[16]; +extern const u8 uuid_be_index[16]; + +int uuid_le_to_bin(const char *uuid, uuid_le *u); +int uuid_be_to_bin(const char *uuid, uuid_be *u); + #endif diff --git a/include/linux/verification.h b/include/linux/verification.h new file mode 100644 index 000000000000..a10549a6c7cd --- /dev/null +++ b/include/linux/verification.h @@ -0,0 +1,49 @@ +/* Signature verification + * + * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_VERIFICATION_H +#define _LINUX_VERIFICATION_H + +/* + * The use to which an asymmetric key is being put. + */ +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE, + VERIFYING_FIRMWARE_SIGNATURE, + VERIFYING_KEXEC_PE_SIGNATURE, + VERIFYING_KEY_SIGNATURE, + VERIFYING_KEY_SELF_SIGNATURE, + VERIFYING_UNSPECIFIED_SIGNATURE, + NR__KEY_BEING_USED_FOR +}; +extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION + +struct key; + +extern int verify_pkcs7_signature(const void *data, size_t len, + const void *raw_pkcs7, size_t pkcs7_len, + struct key *trusted_keys, + enum key_being_used_for usage, + int (*view_content)(void *ctx, + const void *data, size_t len, + size_t asn1hdrlen), + void *ctx); + +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +extern int verify_pefile_signature(const void *pebuf, unsigned pelen, + struct key *trusted_keys, + enum key_being_used_for usage); +#endif + +#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ +#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h deleted file mode 100644 index da2049b5161c..000000000000 --- a/include/linux/verify_pefile.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Signed PE file verification - * - * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _LINUX_VERIFY_PEFILE_H -#define _LINUX_VERIFY_PEFILE_H - -#include <crypto/public_key.h> - -extern int verify_pefile_signature(const void *pebuf, unsigned pelen, - struct key *trusted_keyring, - enum key_being_used_for usage, - bool *_trusted); - -#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index d1f1d338af20..3d9d786a943c 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -4,10 +4,12 @@ #include <linux/spinlock.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/llist.h> #include <asm/page.h> /* pgprot_t */ #include <linux/rbtree.h> struct vm_area_struct; /* vma defining user mapping in mm_types.h */ +struct notifier_block; /* in notifier.h */ /* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ @@ -44,7 +46,7 @@ struct vmap_area { unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct list_head purge_list; /* "lazy purge" list */ + struct llist_node purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; @@ -187,4 +189,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #define VMALLOC_TOTAL 0UL #endif +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 73fae8c4a5fb..d2da8e053210 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, #ifdef CONFIG_NUMA extern unsigned long node_page_state(int node, enum zone_stat_item item); -extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else #define node_page_state(node, item) global_page_state(item) -#define zone_statistics(_zl, _z, gfp) do { } while (0) #endif /* CONFIG_NUMA */ @@ -193,6 +191,10 @@ void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); +struct ctl_table; +int vmstat_refresh(struct ctl_table *, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 1cc4c578deb9..94079bab9243 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -33,8 +33,8 @@ struct xattr_handler { struct inode *inode, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, - const char *name, const void *buffer, size_t size, - int flags); + struct inode *inode, const char *name, const void *buffer, + size_t size, int flags); }; const char *xattr_full_name(const struct xattr_handler *, const char *); @@ -54,7 +54,8 @@ int vfs_removexattr(struct dentry *, const char *); ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); +int generic_setxattr(struct dentry *dentry, struct inode *inode, + const char *name, const void *value, size_t size, int flags); int generic_removexattr(struct dentry *dentry, const char *name); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 34eb16098a33..57a8e98f2708 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,10 +41,10 @@ struct zs_pool_stats { struct zs_pool; -struct zs_pool *zs_create_pool(const char *name, gfp_t flags); +struct zs_pool *zs_create_pool(const char *name); void zs_destroy_pool(struct zs_pool *pool); -unsigned long zs_malloc(struct zs_pool *pool, size_t size); +unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); void *zs_map_object(struct zs_pool *pool, unsigned long handle, diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index 98a938aabdfb..7a8d6037a4bb 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -1,7 +1,7 @@ /* * v4l2-mc.h - Media Controller V4L2 types and prototypes * - * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> + * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org> * Copyright (C) 2006-2010 Nokia Corporation * Copyright (c) 2016 Intel Corporation. * diff --git a/include/misc/cxl.h b/include/misc/cxl.h index 7d5e2613c7b8..56560c5781b4 100644 --- a/include/misc/cxl.h +++ b/include/misc/cxl.h @@ -127,6 +127,14 @@ int cxl_afu_reset(struct cxl_context *ctx); void cxl_set_master(struct cxl_context *ctx); /* + * Sets the context to use real mode memory accesses to operate with + * translation disabled. Note that this only makes sense for kernel contexts + * under bare metal, and will not work with virtualisation. May only be + * performed on stopped contexts. + */ +int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode); + +/* * Map and unmap the AFU Problem Space area. The amount and location mapped * depends on if this context is a master or slave. */ diff --git a/include/net/act_api.h b/include/net/act_api.h index 2cd9e9bb059a..9a9a8edc138f 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -192,7 +192,7 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, #else /* CONFIG_NET_CLS_ACT */ #define tc_no_actions(_exts) true -#define tc_for_each_action(_a, _exts) while (0) +#define tc_for_each_action(_a, _exts) while ((void)(_a), 0) #define tcf_action_stats_update(a, bytes, packets, lastuse) #endif /* CONFIG_NET_CLS_ACT */ diff --git a/include/net/compat.h b/include/net/compat.h index 48103cf94e97..13de0ccaa059 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); +struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, unsigned int); asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, diff --git a/include/net/fou.h b/include/net/fou.h index 19b8a0c62a98..f5cc6910a27e 100644 --- a/include/net/fou.h +++ b/include/net/fou.h @@ -9,11 +9,11 @@ #include <net/udp.h> size_t fou_encap_hlen(struct ip_tunnel_encap *e); -static size_t gue_encap_hlen(struct ip_tunnel_encap *e); +size_t gue_encap_hlen(struct ip_tunnel_encap *e); -int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, - u8 *protocol, struct flowi4 *fl4); -int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, - u8 *protocol, struct flowi4 *fl4); +int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, __be16 *sport, int type); +int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, __be16 *sport, int type); #endif diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 109e3ee9108c..5d683428fced 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -39,6 +39,11 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); +struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb); +int inet_gro_complete(struct sk_buff *skb, int nhoff); +struct sk_buff *inet_gso_segment(struct sk_buff *skb, + netdev_features_t features); + static inline void inet_ctl_sock_destroy(struct sock *sk) { if (sk) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index fb9e0153f4f2..43a5a0e4524c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -52,10 +52,70 @@ struct ip6_tnl { __u32 o_seqno; /* The last output seqno */ int hlen; /* tun_hlen + encap_hlen */ int tun_hlen; /* Precalculated header length */ + int encap_hlen; /* Encap header length (FOU,GUE) */ + struct ip_tunnel_encap encap; int mlink; +}; +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi6 *fl6); }; +#ifdef CONFIG_INET + +extern const struct ip6_tnl_encap_ops __rcu * + ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; + +int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, + unsigned int num); +int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops, + unsigned int num); +int ip6_tnl_encap_setup(struct ip6_tnl *t, + struct ip_tunnel_encap *ipencap); + +static inline int ip6_encap_hlen(struct ip_tunnel_encap *e) +{ + const struct ip6_tnl_encap_ops *ops; + int hlen = -EINVAL; + + if (e->type == TUNNEL_ENCAP_NONE) + return 0; + + if (e->type >= MAX_IPTUN_ENCAP_OPS) + return -EINVAL; + + rcu_read_lock(); + ops = rcu_dereference(ip6tun_encaps[e->type]); + if (likely(ops && ops->encap_hlen)) + hlen = ops->encap_hlen(e); + rcu_read_unlock(); + + return hlen; +} + +static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t, + u8 *protocol, struct flowi6 *fl6) +{ + const struct ip6_tnl_encap_ops *ops; + int ret = -EINVAL; + + if (t->encap.type == TUNNEL_ENCAP_NONE) + return 0; + + if (t->encap.type >= MAX_IPTUN_ENCAP_OPS) + return -EINVAL; + + rcu_read_lock(); + ops = rcu_dereference(ip6tun_encaps[t->encap.type]); + if (likely(ops && ops->build_header)) + ret = ops->build_header(skb, &t->encap, protocol, fl6); + rcu_read_unlock(); + + return ret; +} + /* Tunnel encapsulation limit destination sub-option */ struct ipv6_tlv_tnl_enc_lim { @@ -80,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev); int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); -#ifdef CONFIG_INET static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device *dev) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index d916b4315903..dbf444428437 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -171,22 +171,6 @@ struct ip_tunnel_net { struct ip_tunnel __rcu *collect_md_tun; }; -struct ip_tunnel_encap_ops { - size_t (*encap_hlen)(struct ip_tunnel_encap *e); - int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, - u8 *protocol, struct flowi4 *fl4); -}; - -#define MAX_IPTUN_ENCAP_OPS 8 - -extern const struct ip_tunnel_encap_ops __rcu * - iptun_encaps[MAX_IPTUN_ENCAP_OPS]; - -int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, - unsigned int num); -int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, - unsigned int num); - static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, u8 tos, u8 ttl, __be32 label, @@ -251,8 +235,6 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); -int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, - u8 *protocol, struct flowi4 *fl4); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); @@ -271,9 +253,67 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +}; + +#define MAX_IPTUN_ENCAP_OPS 8 + +extern const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS]; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); + int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); +static inline int ip_encap_hlen(struct ip_tunnel_encap *e) +{ + const struct ip_tunnel_encap_ops *ops; + int hlen = -EINVAL; + + if (e->type == TUNNEL_ENCAP_NONE) + return 0; + + if (e->type >= MAX_IPTUN_ENCAP_OPS) + return -EINVAL; + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[e->type]); + if (likely(ops && ops->encap_hlen)) + hlen = ops->encap_hlen(e); + rcu_read_unlock(); + + return hlen; +} + +static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4) +{ + const struct ip_tunnel_encap_ops *ops; + int ret = -EINVAL; + + if (t->encap.type == TUNNEL_ENCAP_NONE) + return 0; + + if (t->encap.type >= MAX_IPTUN_ENCAP_OPS) + return -EINVAL; + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[t->encap.type]); + if (likely(ops && ops->build_header)) + ret = ops->build_header(skb, &t->encap, protocol, fl4); + rcu_read_unlock(); + + return ret; +} + /* Extract dsfield from inner protocol */ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, const struct sk_buff *skb) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index af4c10ebb241..cd6018a9ee24 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp); const char *ip_vs_state_name(__u16 proto, int state); void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); -int ip_vs_check_template(struct ip_vs_conn *ct); +int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); void ip_vs_random_dropentry(struct netns_ipvs *ipvs); int ip_vs_conn_init(void); void ip_vs_conn_cleanup(void); diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 9c5638ad872e..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -28,8 +28,8 @@ struct nf_queue_handler { struct nf_hook_ops *ops); }; -void nf_register_queue_handler(const struct nf_queue_handler *qh); -void nf_unregister_queue_handler(void); +void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); +void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); void nf_queue_entry_get_refs(struct nf_queue_entry *entry); diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 38aa4983e2a9..36d723579af2 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -5,11 +5,13 @@ struct proc_dir_entry; struct nf_logger; +struct nf_queue_handler; struct netns_nf { #if defined CONFIG_PROC_FS struct proc_dir_entry *proc_netfilter; #endif + const struct nf_queue_handler __rcu *queue_handler; const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; #ifdef CONFIG_SYSCTL struct ctl_table_header *nf_log_dir_header; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f7efa88f210..3722dda0199d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -392,16 +392,20 @@ struct tc_cls_u32_offload { }; }; -static inline bool tc_should_offload(struct net_device *dev, u32 flags) +static inline bool tc_should_offload(const struct net_device *dev, + const struct tcf_proto *tp, u32 flags) { + const struct Qdisc *sch = tp->q; + const struct Qdisc_class_ops *cops = sch->ops->cl_ops; + if (!(dev->features & NETIF_F_HW_TC)) return false; - if (flags & TCA_CLS_FLAGS_SKIP_HW) return false; - if (!dev->netdev_ops->ndo_setup_tc) return false; + if (cops && cops->tcf_cl_offload) + return cops->tcf_cl_offload(tp->classid); return true; } diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 401038d2f9b8..fea53f4d92ca 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) } struct qdisc_watchdog { + u64 last_expires; struct hrtimer timer; struct Qdisc *qdisc; }; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a1fd76c22a59..62d553184e91 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -168,6 +168,7 @@ struct Qdisc_class_ops { /* Filter manipulation */ struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); + bool (*tcf_cl_offload)(u32 classid); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ if (!sch->gso_skb) { sch->gso_skb = sch->dequeue(sch); - if (sch->gso_skb) + if (sch->gso_skb) { /* it's still part of the queue */ + qdisc_qstats_backlog_inc(sch, sch->gso_skb); sch->q.qlen++; + } } return sch->gso_skb; @@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) if (skb) { sch->gso_skb = NULL; + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } else { skb = sch->dequeue(sch); diff --git a/include/net/sock.h b/include/net/sock.h index c9c8b19df27c..649d2a8c17fc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -382,8 +382,13 @@ struct sock { atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; + + /* + * Because of non atomicity rules, all + * changes are protected by socket lock. + */ kmemcheck_bitfield_begin(flags); - unsigned int sk_shutdown : 2, + unsigned int sk_padding : 2, sk_no_check_tx : 1, sk_no_check_rx : 1, sk_userlocks : 4, @@ -391,6 +396,7 @@ struct sock { sk_type : 16; #define SK_PROTOCOL_MAX U8_MAX kmemcheck_bitfield_end(flags); + int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; /* bytes per second */ @@ -418,6 +424,7 @@ struct sock { struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; + u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 37dd534cbeab..c8a773ffe23b 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -239,12 +239,15 @@ struct ib_vendor_mad { #define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) +#define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F +#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5 + struct ib_class_port_info { u8 base_version; u8 class_version; __be16 capability_mask; - u8 reserved[3]; - u8 resp_time_value; + /* 27 bits for cap_mask2, 5 bits for resp_time */ + __be32 cap_mask2_resp_time; u8 redirect_gid[16]; __be32 redirect_tcslfl; __be16 redirect_lid; @@ -259,6 +262,59 @@ struct ib_class_port_info { __be32 trap_qkey; }; +/** + * ib_get_cpi_resp_time - Returns the resp_time value from + * cap_mask2_resp_time in ib_class_port_info. + * @cpi: A struct ib_class_port_info mad. + */ +static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi) +{ + return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) & + IB_CLASS_PORT_INFO_RESP_TIME_MASK); +} + +/** + * ib_set_cpi_resptime - Sets the response time in an + * ib_class_port_info mad. + * @cpi: A struct ib_class_port_info. + * @rtime: The response time to set. + */ +static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi, + u8 rtime) +{ + cpi->cap_mask2_resp_time = + (cpi->cap_mask2_resp_time & + cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) | + cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK); +} + +/** + * ib_get_cpi_capmask2 - Returns the capmask2 value from + * cap_mask2_resp_time in ib_class_port_info. + * @cpi: A struct ib_class_port_info mad. + */ +static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi) +{ + return (be32_to_cpu(cpi->cap_mask2_resp_time) >> + IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE); +} + +/** + * ib_set_cpi_capmask2 - Sets the capmask2 in an + * ib_class_port_info mad. + * @cpi: A struct ib_class_port_info. + * @capmask2: The capmask2 to set. + */ +static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi, + u32 capmask2) +{ + cpi->cap_mask2_resp_time = + (cpi->cap_mask2_resp_time & + cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) | + cpu_to_be32(capmask2 << + IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE); +} + struct ib_mad_notice_attr { u8 generic_type; u8 prod_type_msb; diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index 0f3daae44bf9..b13419ce99ff 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -103,6 +103,9 @@ enum { IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, IB_OPCODE_COMPARE_SWAP = 0x13, IB_OPCODE_FETCH_ADD = 0x14, + /* opcode 0x15 is reserved */ + IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16, + IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17, /* real constants follow -- see comment about above IB_OPCODE() macro for more details */ @@ -129,6 +132,8 @@ enum { IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE), IB_OPCODE(RC, COMPARE_SWAP), IB_OPCODE(RC, FETCH_ADD), + IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE), + IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE), /* UC */ IB_OPCODE(UC, SEND_FIRST), diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index cdc1c81aa275..384041669489 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -94,6 +94,8 @@ enum ib_sa_selector { IB_SA_BEST = 3 }; +#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12) + /* * Structures for SA records are named "struct ib_sa_xxx_rec." No * attempt is made to pack structures to match the physical layout of @@ -439,4 +441,14 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client, void *context, struct ib_sa_query **sa_query); +/* Support get SA ClassPortInfo */ +int ib_sa_classport_info_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + int timeout_ms, gfp_t gfp_mask, + void (*callback)(int status, + struct ib_class_port_info *resp, + void *context), + void *context, + struct ib_sa_query **sa_query); + #endif /* IB_SA_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index fb2cef4e9747..7e440d41487a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -217,9 +217,10 @@ enum ib_device_cap_flags { IB_DEVICE_CROSS_CHANNEL = (1 << 27), IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), - IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), + IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), IB_DEVICE_SG_GAPS_REG = (1ULL << 32), - IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), + IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), + IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), }; enum ib_signature_prot_cap { @@ -402,56 +403,55 @@ enum ib_port_speed { IB_SPEED_EDR = 32 }; -struct ib_protocol_stats { - /* TBD... */ -}; - -struct iw_protocol_stats { - u64 ipInReceives; - u64 ipInHdrErrors; - u64 ipInTooBigErrors; - u64 ipInNoRoutes; - u64 ipInAddrErrors; - u64 ipInUnknownProtos; - u64 ipInTruncatedPkts; - u64 ipInDiscards; - u64 ipInDelivers; - u64 ipOutForwDatagrams; - u64 ipOutRequests; - u64 ipOutDiscards; - u64 ipOutNoRoutes; - u64 ipReasmTimeout; - u64 ipReasmReqds; - u64 ipReasmOKs; - u64 ipReasmFails; - u64 ipFragOKs; - u64 ipFragFails; - u64 ipFragCreates; - u64 ipInMcastPkts; - u64 ipOutMcastPkts; - u64 ipInBcastPkts; - u64 ipOutBcastPkts; - - u64 tcpRtoAlgorithm; - u64 tcpRtoMin; - u64 tcpRtoMax; - u64 tcpMaxConn; - u64 tcpActiveOpens; - u64 tcpPassiveOpens; - u64 tcpAttemptFails; - u64 tcpEstabResets; - u64 tcpCurrEstab; - u64 tcpInSegs; - u64 tcpOutSegs; - u64 tcpRetransSegs; - u64 tcpInErrs; - u64 tcpOutRsts; -}; - -union rdma_protocol_stats { - struct ib_protocol_stats ib; - struct iw_protocol_stats iw; -}; +/** + * struct rdma_hw_stats + * @timestamp - Used by the core code to track when the last update was + * @lifespan - Used by the core code to determine how old the counters + * should be before being updated again. Stored in jiffies, defaults + * to 10 milliseconds, drivers can override the default be specifying + * their own value during their allocation routine. + * @name - Array of pointers to static names used for the counters in + * directory. + * @num_counters - How many hardware counters there are. If name is + * shorter than this number, a kernel oops will result. Driver authors + * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) + * in their code to prevent this. + * @value - Array of u64 counters that are accessed by the sysfs code and + * filled in by the drivers get_stats routine + */ +struct rdma_hw_stats { + unsigned long timestamp; + unsigned long lifespan; + const char * const *names; + int num_counters; + u64 value[]; +}; + +#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 +/** + * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct + * for drivers. + * @names - Array of static const char * + * @num_counters - How many elements in array + * @lifespan - How many milliseconds between updates + */ +static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( + const char * const *names, int num_counters, + unsigned long lifespan) +{ + struct rdma_hw_stats *stats; + + stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), + GFP_KERNEL); + if (!stats) + return NULL; + stats->names = names; + stats->num_counters = num_counters; + stats->lifespan = msecs_to_jiffies(lifespan); + + return stats; +} + /* Define bits for the various functionality this port needs to be supported by * the core. @@ -931,6 +931,13 @@ struct ib_qp_cap { u32 max_send_sge; u32 max_recv_sge; u32 max_inline_data; + + /* + * Maximum number of rdma_rw_ctx structures in flight at a time. + * ib_create_qp() will calculate the right amount of neededed WRs + * and MRs based on this. + */ + u32 max_rdma_ctxs; }; enum ib_sig_type { @@ -981,6 +988,7 @@ enum ib_qp_create_flags { IB_QP_CREATE_NETIF_QP = 1 << 5, IB_QP_CREATE_SIGNATURE_EN = 1 << 6, IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, + IB_QP_CREATE_SCATTER_FCS = 1 << 8, /* reserve bits 26-31 for low level drivers' internal use */ IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_END = 1 << 31, @@ -1002,7 +1010,11 @@ struct ib_qp_init_attr { enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; enum ib_qp_create_flags create_flags; - u8 port_num; /* special QP types only */ + + /* + * Only needed for special QP types, or when using the RW API. + */ + u8 port_num; }; struct ib_qp_open_attr { @@ -1421,9 +1433,14 @@ struct ib_qp { struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq; + spinlock_t mr_lock; + int mrs_used; + struct list_head rdma_mrs; + struct list_head sig_mrs; struct ib_srq *srq; struct ib_xrcd *xrcd; /* XRC TGT QPs only */ struct list_head xrcd_list; + /* count times opened, mcast attaches, flow attaches */ atomic_t usecnt; struct list_head open_list; @@ -1438,12 +1455,16 @@ struct ib_qp { struct ib_mr { struct ib_device *device; struct ib_pd *pd; - struct ib_uobject *uobject; u32 lkey; u32 rkey; u64 iova; u32 length; unsigned int page_size; + bool need_inval; + union { + struct ib_uobject *uobject; /* user */ + struct list_head qp_entry; /* FR */ + }; }; struct ib_mw { @@ -1685,8 +1706,29 @@ struct ib_device { struct iw_cm_verbs *iwcm; - int (*get_protocol_stats)(struct ib_device *device, - union rdma_protocol_stats *stats); + /** + * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the + * driver initialized data. The struct is kfree()'ed by the sysfs + * core when the device is removed. A lifespan of -1 in the return + * struct tells the core to set a default lifespan. + */ + struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, + u8 port_num); + /** + * get_hw_stats - Fill in the counter value(s) in the stats struct. + * @index - The index in the value array we wish to have updated, or + * num_counters if we want all stats updated + * Return codes - + * < 0 - Error, no counters updated + * index - Updated the single counter pointed to by index + * num_counters - Updated all counters (will reset the timestamp + * and prevent further calls for lifespan milliseconds) + * Drivers are allowed to update all counters in leiu of just the + * one given in index at their option + */ + int (*get_hw_stats)(struct ib_device *device, + struct rdma_hw_stats *stats, + u8 port, int index); int (*query_device)(struct ib_device *device, struct ib_device_attr *device_attr, struct ib_udata *udata); @@ -1827,7 +1869,8 @@ struct ib_device { u32 max_num_sg); int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, - int sg_nents); + int sg_nents, + unsigned int *sg_offset); struct ib_mw * (*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); @@ -1903,6 +1946,8 @@ struct ib_device { u8 node_type; u8 phys_port_cnt; struct ib_device_attr attrs; + struct attribute_group *hw_stats_ag; + struct rdma_hw_stats *hw_stats; /** * The following mandatory functions are used only at device @@ -2317,6 +2362,18 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, device->add_gid && device->del_gid; } +/* + * Check if the device supports READ W/ INVALIDATE. + */ +static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) +{ + /* + * iWarp drivers must support READ W/ INVALIDATE. No other protocol + * has support for it yet. + */ + return rdma_protocol_iwarp(dev, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid, struct ib_gid_attr *attr); @@ -3111,29 +3168,23 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr); -int ib_map_mr_sg(struct ib_mr *mr, - struct scatterlist *sg, - int sg_nents, - unsigned int page_size); +int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset, unsigned int page_size); static inline int -ib_map_mr_sg_zbva(struct ib_mr *mr, - struct scatterlist *sg, - int sg_nents, - unsigned int page_size) +ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset, unsigned int page_size) { int n; - n = ib_map_mr_sg(mr, sg, sg_nents, page_size); + n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); mr->iova = 0; return n; } -int ib_sg_to_pages(struct ib_mr *mr, - struct scatterlist *sgl, - int sg_nents, - int (*set_page)(struct ib_mr *, u64)); +int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, + unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); diff --git a/include/rdma/mr_pool.h b/include/rdma/mr_pool.h new file mode 100644 index 000000000000..986010b812eb --- /dev/null +++ b/include/rdma/mr_pool.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2016 HGST, a Western Digital Company. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef _RDMA_MR_POOL_H +#define _RDMA_MR_POOL_H 1 + +#include <rdma/ib_verbs.h> + +struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list); +void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr); + +int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr, + enum ib_mr_type type, u32 max_num_sg); +void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list); + +#endif /* _RDMA_MR_POOL_H */ diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index a8696551abb1..9c9a27d42aaa 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -149,15 +149,15 @@ struct rvt_driver_params { int qpn_res_end; int nports; int npkeys; - u8 qos_shift; char cq_name[RVT_CQN_MAX]; int node; - int max_rdma_atomic; int psn_mask; int psn_shift; int psn_modify_mask; u32 core_cap_flags; u32 max_mad_size; + u8 qos_shift; + u8 max_rdma_atomic; }; /* Protection domain */ @@ -203,7 +203,9 @@ struct rvt_driver_provided { /* * Allocate a private queue pair data structure for driver specific - * information which is opaque to rdmavt. + * information which is opaque to rdmavt. Errors are returned via + * ERR_PTR(err). The driver is free to return NULL or a valid + * pointer. */ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); @@ -426,6 +428,15 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi) } /* + * Return the max atomic suitable for determining + * the size of the ack ring buffer in a QP. + */ +static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi) +{ + return rdi->dparms.max_rdma_atomic + 1; +} + +/* * Return the indexed PKEY from the port PKEY table. */ static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, @@ -467,6 +478,7 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, } struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); +void rvt_dealloc_device(struct rvt_dev_info *rdi); int rvt_register_device(struct rvt_dev_info *rvd); void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 497e59065c2c..6d23b879416a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -117,8 +117,9 @@ /* * Wait flags that would prevent any packet type from being sent. */ -#define RVT_S_ANY_WAIT_IO (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \ - RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) +#define RVT_S_ANY_WAIT_IO \ + (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \ + RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) /* * Wait flags that would prevent send work requests from making progress. @@ -210,8 +211,6 @@ struct rvt_mmap_info { unsigned size; }; -#define RVT_MAX_RDMA_ATOMIC 16 - /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. @@ -281,8 +280,7 @@ struct rvt_qp { atomic_t refcount ____cacheline_aligned_in_smp; wait_queue_head_t wait; - struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1] - ____cacheline_aligned_in_smp; + struct rvt_ack_entry *s_ack_queue; struct rvt_sge_state s_rdma_read_sge; spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ diff --git a/include/rdma/rw.h b/include/rdma/rw.h new file mode 100644 index 000000000000..377d865e506d --- /dev/null +++ b/include/rdma/rw.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2016 HGST, a Western Digital Company. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef _RDMA_RW_H +#define _RDMA_RW_H + +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#include <rdma/mr_pool.h> + +struct rdma_rw_ctx { + /* number of RDMA READ/WRITE WRs (not counting MR WRs) */ + u32 nr_ops; + + /* tag for the union below: */ + u8 type; + + union { + /* for mapping a single SGE: */ + struct { + struct ib_sge sge; + struct ib_rdma_wr wr; + } single; + + /* for mapping of multiple SGEs: */ + struct { + struct ib_sge *sges; + struct ib_rdma_wr *wrs; + } map; + + /* for registering multiple WRs: */ + struct rdma_rw_reg_ctx { + struct ib_sge sge; + struct ib_rdma_wr wr; + struct ib_reg_wr reg_wr; + struct ib_send_wr inv_wr; + struct ib_mr *mr; + } *reg; + + struct { + struct rdma_rw_reg_ctx data; + struct rdma_rw_reg_ctx prot; + struct ib_send_wr sig_inv_wr; + struct ib_mr *sig_mr; + struct ib_sge sig_sge; + struct ib_sig_handover_wr sig_wr; + } *sig; + }; +}; + +int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, + struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, + u64 remote_addr, u32 rkey, enum dma_data_direction dir); +void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, + struct scatterlist *sg, u32 sg_cnt, + enum dma_data_direction dir); + +int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u8 port_num, struct scatterlist *sg, u32 sg_cnt, + struct scatterlist *prot_sg, u32 prot_sg_cnt, + struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, + enum dma_data_direction dir); +void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u8 port_num, struct scatterlist *sg, u32 sg_cnt, + struct scatterlist *prot_sg, u32 prot_sg_cnt, + enum dma_data_direction dir); + +struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); +int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, + struct ib_cqe *cqe, struct ib_send_wr *chain_wr); + +void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); +int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); +void rdma_rw_cleanup_mrs(struct ib_qp *qp); + +#endif /* _RDMA_RW_H */ diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h index 11571b2a831e..20bf7eaef05a 100644 --- a/include/scsi/scsi_common.h +++ b/include/scsi/scsi_common.h @@ -63,6 +63,7 @@ extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); int scsi_set_sense_information(u8 *buf, int buf_len, u64 info); +int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd); extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, int desc_type); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index dbb8c640e26f..98d366b55770 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -16,6 +16,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int); extern int scsi_block_when_processing_errors(struct scsi_device *); extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, struct scsi_sense_hdr *sshdr); +extern int scsi_check_sense(struct scsi_cmnd *); static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr) { diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h new file mode 100644 index 000000000000..9b1d43d671a3 --- /dev/null +++ b/include/soc/nps/common.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef SOC_NPS_COMMON_H +#define SOC_NPS_COMMON_H + +#ifdef CONFIG_SMP +#define NPS_IPI_IRQ 5 +#endif + +#define NPS_HOST_REG_BASE 0xF6000000 + +#define NPS_MSU_BLKID 0x018 + +#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E +#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 +#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 + +#ifndef __ASSEMBLY__ + +/* In order to increase compilation test coverage */ +#ifdef CONFIG_ARC +static inline void nps_ack_gic(void) +{ + __asm__ __volatile__ ( + " .word %0\n" + : + : "i"(CTOP_INST_RSPI_GIC_0_R12) + : "memory"); +} +#else +static inline void nps_ack_gic(void) { } +#define write_aux_reg(r, v) +#define read_aux_reg(r) 0 +#endif + +/* CPU global ID */ +struct global_id { + union { + struct { +#ifdef CONFIG_EZNPS_MTM_EXT + u32 __reserved:20, cluster:4, core:4, thread:4; +#else + u32 __reserved:24, cluster:4, core:4; +#endif + }; + u32 value; + }; +}; + +/* + * Convert logical to physical CPU IDs + * + * The conversion swap bits 1 and 2 of cluster id (out of 4 bits) + * Now quad of logical clusters id's are adjacent physically, + * and not like the id's physically came with each cluster. + * Below table is 4x4 mesh of core clusters as it layout on chip. + * Cluster ids are in format: logical (physical) + * + * ----------------- ------------------ + * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)| + * + * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)| + * ----------------- ------------------ + * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)| + * + * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)| + * ----------------- ------------------ + * 0 1 2 3 + */ +static inline int nps_cluster_logic_to_phys(int cluster) +{ +#ifdef __arc__ + __asm__ __volatile__( + " mov r3,%0\n" + " .short %1\n" + " .word %2\n" + " mov %0,r3\n" + : "+r"(cluster) + : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST), + "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM) + : "r3"); +#endif + + return cluster; +} + +#define NPS_CPU_TO_CLUSTER_NUM(cpu) \ + ({ struct global_id gid; gid.value = cpu; \ + nps_cluster_logic_to_phys(gid.cluster); }) + +struct nps_host_reg_address { + union { + struct { + u32 base:8, cl_x:4, cl_y:4, + blkid:6, reg:8, __reserved:2; + }; + u32 value; + }; +}; + +struct nps_host_reg_address_non_cl { + union { + struct { + u32 base:7, blkid:11, reg:12, __reserved:2; + }; + u32 value; + }; +}; + +static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg) +{ + struct nps_host_reg_address_non_cl reg_address; + + reg_address.value = NPS_HOST_REG_BASE; + reg_address.blkid = blkid; + reg_address.reg = reg; + + return (void *)reg_address.value; +} + +static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg) +{ + struct nps_host_reg_address reg_address; + u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu); + + reg_address.value = NPS_HOST_REG_BASE; + reg_address.cl_x = (cl >> 2) & 0x3; + reg_address.cl_y = cl & 0x3; + reg_address.blkid = blkid; + reg_address.reg = reg; + + return (void *)reg_address.value; +} +#endif /* __ASSEMBLY__ */ + +#endif /* SOC_NPS_COMMON_H */ diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index f86ef5ea9b01..67be2445941a 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -51,6 +51,16 @@ struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, void *filter_data); struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + /** * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data * @addr: Address of the DAI data source or destination register. @@ -63,6 +73,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) * requesting the DMA channel. * @chan_name: Custom channel name to use when requesting DMA channel. * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now */ struct snd_dmaengine_dai_dma_data { dma_addr_t addr; @@ -72,6 +83,7 @@ struct snd_dmaengine_dai_dma_data { void *filter_data; const char *chan_name; unsigned int fifo_size; + unsigned int flags; }; void snd_dmaengine_pcm_set_config_from_dai_data( diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h index e20d219a0304..babd445c7505 100644 --- a/include/sound/hda_chmap.h +++ b/include/sound/hda_chmap.h @@ -36,6 +36,8 @@ struct hdac_chmap_ops { int (*chmap_validate)(struct hdac_chmap *hchmap, int ca, int channels, unsigned char *chmap); + int (*get_spk_alloc)(struct hdac_device *hdac, int pcm_idx); + void (*get_chmap)(struct hdac_device *hdac, int pcm_idx, unsigned char *chmap); void (*set_chmap)(struct hdac_device *hdac, int pcm_idx, diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index f5842bcd9c94..796cabf6be5e 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h @@ -10,8 +10,8 @@ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); int snd_hdac_display_power(struct hdac_bus *bus, bool enable); void snd_hdac_i915_set_bclk(struct hdac_bus *bus); -int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); -int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, +int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate); +int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes); int snd_hdac_i915_init(struct hdac_bus *bus); int snd_hdac_i915_exit(struct hdac_bus *bus); @@ -28,12 +28,12 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable) static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus) { } -static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, - int rate) +static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec, + hda_nid_t nid, int rate) { return 0; } -static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, +static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes) { diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 07fa59237feb..b9593b201599 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -14,6 +14,8 @@ * @gtscap: gts capabilities pointer * @drsmcap: dma resume capabilities pointer * @hlink_list: link list of HDA links + * @lock: lock for link mgmt + * @cmd_dma_state: state of cmd DMAs: CORB and RIRB */ struct hdac_ext_bus { struct hdac_bus bus; @@ -27,6 +29,9 @@ struct hdac_ext_bus { void __iomem *drsmcap; struct list_head hlink_list; + + struct mutex lock; + bool cmd_dma_state; }; int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev, @@ -142,6 +147,9 @@ struct hdac_ext_link { void __iomem *ml_addr; /* link output stream reg pointer */ u32 lcaps; /* link capablities */ u16 lsdiid; /* link sdi identifier */ + + int ref_count; + struct list_head list; }; @@ -154,6 +162,11 @@ void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link, void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, int stream); +int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus, + struct hdac_ext_link *link); +int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus, + struct hdac_ext_link *link); + /* update register macro */ #define snd_hdac_updatel(addr, reg, mask, val) \ writel(((readl(addr + reg) & ~(mask)) | (val)), \ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h new file mode 100644 index 000000000000..fc3a481ad91e --- /dev/null +++ b/include/sound/hdmi-codec.h @@ -0,0 +1,100 @@ +/* + * hdmi-codec.h - HDMI Codec driver API + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Jyri Sarha <jsarha@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __HDMI_CODEC_H__ +#define __HDMI_CODEC_H__ + +#include <linux/hdmi.h> +#include <drm/drm_edid.h> +#include <sound/asoundef.h> +#include <uapi/sound/asound.h> + +/* + * Protocol between ASoC cpu-dai and HDMI-encoder + */ +struct hdmi_codec_daifmt { + enum { + HDMI_I2S, + HDMI_RIGHT_J, + HDMI_LEFT_J, + HDMI_DSP_A, + HDMI_DSP_B, + HDMI_AC97, + HDMI_SPDIF, + } fmt; + int bit_clk_inv:1; + int frame_clk_inv:1; + int bit_clk_master:1; + int frame_clk_master:1; +}; + +/* + * HDMI audio parameters + */ +struct hdmi_codec_params { + struct hdmi_audio_infoframe cea; + struct snd_aes_iec958 iec; + int sample_rate; + int sample_width; + int channels; +}; + +struct hdmi_codec_ops { + /* + * Called when ASoC starts an audio stream setup. + * Optional + */ + int (*audio_startup)(struct device *dev); + + /* + * Configures HDMI-encoder for audio stream. + * Mandatory + */ + int (*hw_params)(struct device *dev, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /* + * Shuts down the audio stream. + * Mandatory + */ + void (*audio_shutdown)(struct device *dev); + + /* + * Mute/unmute HDMI audio stream. + * Optional + */ + int (*digital_mute)(struct device *dev, bool enable); + + /* + * Provides EDID-Like-Data from connected HDMI device. + * Optional + */ + int (*get_eld)(struct device *dev, uint8_t *buf, size_t len); +}; + +/* HDMI codec initalization data */ +struct hdmi_codec_pdata { + const struct hdmi_codec_ops *ops; + uint i2s:1; + uint spdif:1; + int max_i2s_channels; +}; + +#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec" + +#endif /* __HDMI_CODEC_H__ */ diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h index 0eed397aca8e..36f023acb201 100644 --- a/include/sound/pcm_iec958.h +++ b/include/sound/pcm_iec958.h @@ -6,4 +6,6 @@ int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs, size_t len); +int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params, + u8 *cs, size_t len); #endif diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 97069466c38d..3101d53468aa 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -100,6 +100,7 @@ struct device; { .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols} +/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */ #define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \ { .id = snd_soc_dapm_micbias, .name = wname, \ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ @@ -473,7 +474,7 @@ enum snd_soc_dapm_type { snd_soc_dapm_out_drv, /* output driver */ snd_soc_dapm_adc, /* analog to digital converter */ snd_soc_dapm_dac, /* digital to analog converter */ - snd_soc_dapm_micbias, /* microphone bias (power) */ + snd_soc_dapm_micbias, /* microphone bias (power) - DEPRECATED: use snd_soc_dapm_supply */ snd_soc_dapm_mic, /* microphone */ snd_soc_dapm_hp, /* headphones */ snd_soc_dapm_spk, /* speaker */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 02b4a215fd75..fd7b58a58d6f 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1002,7 +1002,7 @@ struct snd_soc_dai_link { */ const char *platform_name; struct device_node *platform_of_node; - int be_id; /* optional ID for machine driver BE identification */ + int id; /* optional ID for machine driver link identification */ const struct snd_soc_pcm_stream *params; unsigned int num_params; @@ -1683,6 +1683,9 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card, int snd_soc_register_dai(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv); +struct snd_soc_dai *snd_soc_find_dai( + const struct snd_soc_dai_link_component *dlc); + #include <sound/soc-dai.h> #ifdef CONFIG_DEBUG_FS diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index c3371fa548cb..4ac24f5a3308 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -74,6 +74,7 @@ enum iscsit_transport_type { ISCSI_IWARP_TCP = 3, ISCSI_IWARP_SCTP = 4, ISCSI_INFINIBAND = 5, + ISCSI_CXGBIT = 6, }; /* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */ @@ -890,4 +891,30 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session) } extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); + +static inline void iscsit_thread_check_cpumask( + struct iscsi_conn *conn, + struct task_struct *p, + int mode) +{ + /* + * mode == 1 signals iscsi_target_tx_thread() usage. + * mode == 0 signals iscsi_target_rx_thread() usage. + */ + if (mode == 1) { + if (!conn->conn_tx_reset_cpumask) + return; + conn->conn_tx_reset_cpumask = 0; + } else { + if (!conn->conn_rx_reset_cpumask) + return; + conn->conn_rx_reset_cpumask = 0; + } + /* + * Update the CPU mask for this single kthread so that + * both TX and RX kthreads are scheduled to run on the + * same CPU. + */ + set_cpus_allowed_ptr(p, conn->conn_cpumask); +} #endif /* ISCSI_TARGET_CORE_H */ diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 90e37faa2ede..40ac7cd80150 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -6,6 +6,7 @@ struct iscsit_transport { #define ISCSIT_TRANSPORT_NAME 16 char name[ISCSIT_TRANSPORT_NAME]; int transport_type; + bool rdma_shutdown; int priv_size; struct module *owner; struct list_head t_node; @@ -22,6 +23,13 @@ struct iscsit_transport { int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); + int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *, + struct iscsi_datain_req *, const void *, u32); + void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *); + void (*iscsit_get_rx_pdu)(struct iscsi_conn *); + int (*iscsit_validate_params)(struct iscsi_conn *); + void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *, + struct iscsi_r2t *); enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *); }; @@ -77,6 +85,18 @@ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_logout_rsp *); extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); +extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *); +extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *); +extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *); +extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *); +extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *); +extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *, + struct iscsi_datain *, + struct iscsi_data_rsp *, bool); +extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, + bool); +extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); +extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); /* * From iscsi_target_device.c */ @@ -102,3 +122,24 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *, __be32); extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *, bool); +extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, + struct iscsi_conn *, u8); + +/* + * From iscsi_target_nego.c + */ +extern int iscsi_target_check_login_request(struct iscsi_conn *, + struct iscsi_login *); + +/* + * From iscsi_target_login.c + */ +extern __printf(2, 3) int iscsi_change_param_sprintf( + struct iscsi_conn *, const char *, ...); + +/* + * From iscsi_target_parameters.c + */ +extern struct iscsi_param *iscsi_find_param_from_key( + char *, struct iscsi_param_list *); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 28ee5c2e6bcd..d8ab5101fad5 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[]; void *transport_kmap_data_sg(struct se_cmd *); void transport_kunmap_data_sg(struct se_cmd *); /* core helpers also used by xcopy during internal command setup */ -int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool); sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, struct scatterlist *, u32, struct scatterlist *, u32); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 3e0dd86360a2..b316b44d03f3 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -536,7 +536,6 @@ struct se_node_acl { char initiatorname[TRANSPORT_IQN_LEN]; /* Used to signal demo mode created ACL, disabled by default */ bool dynamic_node_acl; - bool acl_stop:1; u32 queue_depth; u32 acl_index; enum target_prot_type saved_prot_type; @@ -603,7 +602,6 @@ struct se_session { struct list_head sess_cmd_list; struct list_head sess_wait_list; spinlock_t sess_cmd_lock; - struct kref sess_kref; void *sess_cmd_map; struct percpu_ida sess_tag_pool; }; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 8ff6d40a294f..de44462a7680 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -50,10 +50,6 @@ struct target_core_fabric_ops { */ int (*check_stop_free)(struct se_cmd *); void (*release_cmd)(struct se_cmd *); - /* - * Called with spin_lock_bh(struct se_portal_group->session_lock held. - */ - int (*shutdown_session)(struct se_session *); void (*close_session)(struct se_session *); u32 (*sess_get_index)(struct se_session *); /* @@ -123,8 +119,6 @@ void __transport_register_session(struct se_portal_group *, struct se_node_acl *, struct se_session *, void *); void transport_register_session(struct se_portal_group *, struct se_node_acl *, struct se_session *, void *); -int target_get_session(struct se_session *); -void target_put_session(struct se_session *); ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); void transport_free_session(struct se_session *); void target_put_nacl(struct se_node_acl *); @@ -185,6 +179,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *, int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); int core_tpg_deregister(struct se_portal_group *); +int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, + u32 length, bool zero_page, bool chainable); +void target_free_sgl(struct scatterlist *sgl, int nents); + /* * The LIO target core uses DMA_TO_DEVICE to mean that data is going * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index e215bf68f521..36e2d6fb1360 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -10,10 +10,11 @@ #include <trace/events/mmflags.h> #define COMPACTION_STATUS \ - EM( COMPACT_DEFERRED, "deferred") \ EM( COMPACT_SKIPPED, "skipped") \ + EM( COMPACT_DEFERRED, "deferred") \ EM( COMPACT_CONTINUE, "continue") \ EM( COMPACT_PARTIAL, "partial") \ + EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ EM( COMPACT_COMPLETE, "complete") \ EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 0f565845707b..3a09bb4dc3b2 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit, __entry->ret) ); -TRACE_EVENT(f2fs_reserve_new_block, +TRACE_EVENT(f2fs_reserve_new_blocks, - TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node), + TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node, + blkcnt_t count), - TP_ARGS(inode, nid, ofs_in_node), + TP_ARGS(inode, nid, ofs_in_node, count), TP_STRUCT__entry( __field(dev_t, dev) __field(nid_t, nid) __field(unsigned int, ofs_in_node) + __field(blkcnt_t, count) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = nid; __entry->ofs_in_node = ofs_in_node; + __entry->count = count; ), - TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u", + TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu", show_dev(__entry), (unsigned int)__entry->nid, - __entry->ofs_in_node) + __entry->ofs_in_node, + (unsigned long long)__entry->count) ); DECLARE_EVENT_CLASS(f2fs__submit_page_bio, @@ -1271,14 +1275,14 @@ TRACE_EVENT(f2fs_destroy_extent_tree, DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, - TP_PROTO(struct super_block *sb, int type, int count), + TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count), TP_STRUCT__entry( __field(dev_t, dev) __field(int, type) - __field(int, count) + __field(s64, count) ), TP_fast_assign( @@ -1287,7 +1291,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, __entry->count = count; ), - TP_printk("dev = (%d,%d), %s, dirty count = %d", + TP_printk("dev = (%d,%d), %s, dirty count = %lld", show_dev(__entry), show_file_type(__entry->type), __entry->count) @@ -1295,14 +1299,14 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter, - TP_PROTO(struct super_block *sb, int type, int count), + TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count) ); DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit, - TP_PROTO(struct super_block *sb, int type, int count), + TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count) ); diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index aa69253ecc7d..f28292d73ddb 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -38,22 +38,25 @@ TRACE_EVENT(kvm_userspace_exit, ); TRACE_EVENT(kvm_vcpu_wakeup, - TP_PROTO(__u64 ns, bool waited), - TP_ARGS(ns, waited), + TP_PROTO(__u64 ns, bool waited, bool valid), + TP_ARGS(ns, waited, valid), TP_STRUCT__entry( __field( __u64, ns ) __field( bool, waited ) + __field( bool, valid ) ), TP_fast_assign( __entry->ns = ns; __entry->waited = waited; + __entry->valid = valid; ), - TP_printk("%s time %lld ns", + TP_printk("%s time %lld ns, polling %s", __entry->waited ? "wait" : "poll", - __entry->ns) + __entry->ns, + __entry->valid ? "valid" : "invalid") ); #if defined(CONFIG_HAVE_KVM_IRQFD) @@ -105,7 +108,7 @@ TRACE_EVENT(kvm_ioapic_set_irq, __entry->coalesced = coalesced; ), - TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s", + TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s", __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e, __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), (__entry->e & (1<<11)) ? "logical" : "physical", @@ -126,7 +129,7 @@ TRACE_EVENT(kvm_ioapic_delayed_eoi_inj, __entry->e = e; ), - TP_printk("dst %x vec=%u (%s|%s|%s%s)", + TP_printk("dst %x vec %u (%s|%s|%s%s)", (u8)(__entry->e >> 56), (u8)__entry->e, __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), (__entry->e & (1<<11)) ? "logical" : "physical", @@ -148,7 +151,7 @@ TRACE_EVENT(kvm_msi_set_irq, __entry->data = data; ), - TP_printk("dst %u vec %x (%s|%s|%s%s)", + TP_printk("dst %u vec %u (%s|%s|%s%s)", (u8)(__entry->address >> 12), (u8)__entry->data, __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode), (__entry->address & (1<<2)) ? "logical" : "physical", diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h index 8b0fbd93082c..75fff8696bae 100644 --- a/include/trace/events/libata.h +++ b/include/trace/events/libata.h @@ -39,6 +39,7 @@ ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \ ata_opcode_name(ATA_CMD_FPDMA_READ), \ ata_opcode_name(ATA_CMD_FPDMA_WRITE), \ + ata_opcode_name(ATA_CMD_NCQ_NON_DATA), \ ata_opcode_name(ATA_CMD_FPDMA_SEND), \ ata_opcode_name(ATA_CMD_FPDMA_RECV), \ ata_opcode_name(ATA_CMD_PIO_READ), \ @@ -97,6 +98,8 @@ ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \ ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \ ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \ + ata_opcode_name(ATA_CMD_ZAC_MGMT_IN), \ + ata_opcode_name(ATA_CMD_ZAC_MGMT_OUT), \ ata_opcode_name(ATA_CMD_RESTORE), \ ata_opcode_name(ATA_CMD_READ_LONG), \ ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \ @@ -139,6 +142,10 @@ const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int); const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int); #define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f) +const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char, + unsigned char, unsigned char); +#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h) + TRACE_EVENT(ata_qc_issue, TP_PROTO(struct ata_queued_cmd *qc), @@ -185,11 +192,12 @@ TRACE_EVENT(ata_qc_issue, __entry->hob_nsect = qc->tf.hob_nsect; ), - TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \ + TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s%s " \ " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)", __entry->ata_port, __entry->ata_dev, __entry->tag, show_protocol_name(__entry->proto), show_opcode_name(__entry->cmd), + __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect), __entry->cmd, __entry->feature, __entry->nsect, __entry->lbal, __entry->lbam, __entry->lbah, __entry->hob_feature, __entry->hob_nsect, diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index c51afb71bfab..a26415b5151c 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -127,8 +127,11 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat) __SYSCALL(__NR_symlinkat, sys_symlinkat) #define __NR_linkat 37 __SYSCALL(__NR_linkat, sys_linkat) +#ifdef __ARCH_WANT_RENAMEAT +/* renameat is superseded with flags by renameat2 */ #define __NR_renameat 38 __SYSCALL(__NR_renameat, sys_renameat) +#endif /* __ARCH_WANT_RENAMEAT */ /* fs/namespace.c */ #define __NR_umount2 39 diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 453a76af123c..cdecf87576e8 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -34,6 +34,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_AMDGPU_GEM_CREATE 0x00 #define DRM_AMDGPU_GEM_MMAP 0x01 #define DRM_AMDGPU_CTX 0x02 @@ -642,4 +646,8 @@ struct drm_amdgpu_info_hw_ip { #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h index 6de7f0196ca0..72e326f9c7de 100644 --- a/include/uapi/drm/armada_drm.h +++ b/include/uapi/drm/armada_drm.h @@ -11,6 +11,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_ARMADA_GEM_CREATE 0x00 #define DRM_ARMADA_GEM_MMAP 0x02 #define DRM_ARMADA_GEM_PWRITE 0x03 @@ -44,4 +48,8 @@ struct drm_armada_gem_pwrite { #define DRM_IOCTL_ARMADA_GEM_PWRITE \ ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite) +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index a0ebfe7c9a28..452675fb55d9 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -36,7 +36,13 @@ #ifndef _DRM_H_ #define _DRM_H_ -#if defined(__KERNEL__) || defined(__linux__) +#if defined(__KERNEL__) + +#include <linux/types.h> +#include <asm/ioctl.h> +typedef unsigned int drm_handle_t; + +#elif defined(__linux__) #include <linux/types.h> #include <asm/ioctl.h> @@ -59,6 +65,10 @@ typedef unsigned long drm_handle_t; #endif +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ @@ -181,7 +191,7 @@ enum drm_map_type { _DRM_SHM = 2, /**< shared, cached */ _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ - _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ }; /** @@ -373,7 +383,11 @@ struct drm_buf_pub { */ struct drm_buf_map { int count; /**< Length of the buffer list */ +#ifdef __cplusplus + void __user *virt; +#else void __user *virtual; /**< Mmap'd area in user-virtual */ +#endif struct drm_buf_pub __user *list; /**< Buffer information */ }; @@ -431,7 +445,7 @@ struct drm_draw { * DRM_IOCTL_UPDATE_DRAW ioctl argument type. */ typedef enum { - DRM_DRAWABLE_CLIPRECTS, + DRM_DRAWABLE_CLIPRECTS } drm_drawable_info_type_t; struct drm_update_draw { @@ -681,7 +695,15 @@ struct drm_prime_handle { __s32 fd; }; -#include <drm/drm_mode.h> +#if defined(__cplusplus) +} +#endif + +#include "drm_mode.h" + +#if defined(__cplusplus) +extern "C" { +#endif #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) @@ -876,4 +898,8 @@ typedef struct drm_scatter_gather drm_scatter_gather_t; typedef struct drm_set_version drm_set_version_t; #endif +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 4d8da699a623..a5890bf44c0a 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -26,6 +26,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ ((__u32)(c) << 16) | ((__u32)(d) << 24)) @@ -229,4 +233,8 @@ */ #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) +#if defined(__cplusplus) +} +#endif + #endif /* DRM_FOURCC_H */ diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index c0217434d28d..49a72659b801 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -29,6 +29,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 #define DRM_DISPLAY_MODE_LEN 32 @@ -202,6 +206,7 @@ struct drm_mode_get_plane_res { #define DRM_MODE_ENCODER_VIRTUAL 5 #define DRM_MODE_ENCODER_DSI 6 #define DRM_MODE_ENCODER_DPMST 7 +#define DRM_MODE_ENCODER_DPI 8 struct drm_mode_get_encoder { __u32 encoder_id; @@ -241,6 +246,7 @@ struct drm_mode_get_encoder { #define DRM_MODE_CONNECTOR_eDP 14 #define DRM_MODE_CONNECTOR_VIRTUAL 15 #define DRM_MODE_CONNECTOR_DSI 16 +#define DRM_MODE_CONNECTOR_DPI 17 struct drm_mode_get_connector { @@ -320,6 +326,16 @@ struct drm_mode_connector_set_property { __u32 connector_id; }; +#define DRM_MODE_OBJECT_CRTC 0xcccccccc +#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 +#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 +#define DRM_MODE_OBJECT_MODE 0xdededede +#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 +#define DRM_MODE_OBJECT_FB 0xfbfbfbfb +#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb +#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee +#define DRM_MODE_OBJECT_ANY 0 + struct drm_mode_obj_get_properties { __u64 props_ptr; __u64 prop_values_ptr; @@ -611,4 +627,8 @@ struct drm_mode_destroy_blob { __u32 blob_id; }; +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h index 1d1a858a203d..a951ced60ebe 100644 --- a/include/uapi/drm/drm_sarea.h +++ b/include/uapi/drm/drm_sarea.h @@ -34,6 +34,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* SAREA area needs to be at least a page */ #if defined(__alpha__) #define SAREA_MAX 0x2000U @@ -83,4 +87,8 @@ typedef struct drm_sarea_frame drm_sarea_frame_t; typedef struct drm_sarea drm_sarea_t; #endif +#if defined(__cplusplus) +} +#endif + #endif /* _DRM_SAREA_H_ */ diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index f95e1c43c3fb..2584c1cca42f 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -19,6 +19,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints: * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit @@ -222,4 +226,8 @@ struct drm_etnaviv_gem_wait { #define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) #define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) +#if defined(__cplusplus) +} +#endif + #endif /* __ETNAVIV_DRM_H__ */ diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index 3947c2eb8d69..cb3e9f9d029f 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -17,6 +17,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /** * User-desired buffer creation information structure. * @@ -362,4 +366,8 @@ struct drm_exynos_ipp_event { __u32 buf_id[EXYNOS_DRM_OPS_MAX]; }; +#if defined(__cplusplus) +} +#endif + #endif /* _UAPI_EXYNOS_DRM_H_ */ diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h index bdb028723ded..6e6cf86b75b0 100644 --- a/include/uapi/drm/i810_drm.h +++ b/include/uapi/drm/i810_drm.h @@ -3,6 +3,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. */ @@ -280,4 +284,8 @@ typedef struct _drm_i810_mc { unsigned int last_render; /* Last Render Request */ } drm_i810_mc_t; +#if defined(__cplusplus) +} +#endif + #endif /* _I810_DRM_H_ */ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index a5524cc95ff8..c17d63d8b543 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -29,6 +29,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. */ @@ -1170,4 +1174,8 @@ struct drm_i915_gem_context_param { __u64 value; }; +#if defined(__cplusplus) +} +#endif + #endif /* _UAPI_I915_DRM_H_ */ diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h index fca817009e13..8c4337548ab5 100644 --- a/include/uapi/drm/mga_drm.h +++ b/include/uapi/drm/mga_drm.h @@ -37,6 +37,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (mga_sarea.h) */ @@ -416,4 +420,8 @@ typedef struct drm_mga_getparam { void __user *value; } drm_mga_getparam_t; +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 254d3e92d18e..bf19d2cd9078 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -20,6 +20,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints: * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit @@ -217,4 +221,8 @@ struct drm_msm_wait_fence { #define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) #define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) +#if defined(__cplusplus) +} +#endif + #endif /* __MSM_DRM_H__ */ diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index 500d82aecbe4..259588a4b61b 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -27,7 +27,11 @@ #define DRM_NOUVEAU_EVENT_NVIF 0x80000000 -#include <drm/drm.h> +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) @@ -141,4 +145,8 @@ struct drm_nouveau_gem_cpu_fini { #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini) #define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info) +#if defined(__cplusplus) +} +#endif + #endif /* __NOUVEAU_DRM_H__ */ diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 38a3bd847e15..407cb55df6ac 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -22,6 +22,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. */ @@ -114,4 +118,8 @@ struct drm_omap_gem_info { #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) #define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info) +#if defined(__cplusplus) +} +#endif + #endif /* __OMAP_DRM_H__ */ diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index 4d1e32640463..7eef42213051 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -26,6 +26,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * @@ -84,7 +88,6 @@ struct drm_qxl_command { __u32 pad; }; -/* XXX: call it drm_qxl_commands? */ struct drm_qxl_execbuffer { __u32 flags; /* for future use */ __u32 commands_num; @@ -148,4 +151,8 @@ struct drm_qxl_alloc_surf { DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\ struct drm_qxl_alloc_surf) +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h index 7a44c6500a7e..690e9c62f510 100644 --- a/include/uapi/drm/r128_drm.h +++ b/include/uapi/drm/r128_drm.h @@ -35,6 +35,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (r128_sarea.h) */ @@ -325,4 +329,8 @@ typedef struct drm_r128_getparam { void __user *value; } drm_r128_getparam_t; +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index ccb9bcd82685..490a59cc4532 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -35,6 +35,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (radeon_sarea.h) */ @@ -1067,4 +1071,8 @@ struct drm_radeon_info { #define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h index 574147489c60..0f6eddef74aa 100644 --- a/include/uapi/drm/savage_drm.h +++ b/include/uapi/drm/savage_drm.h @@ -28,6 +28,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #ifndef __SAVAGE_SAREA_DEFINES__ #define __SAVAGE_SAREA_DEFINES__ @@ -209,4 +213,8 @@ union drm_savage_cmd_header { } clear1; /* SAVAGE_CMD_CLEAR data */ }; +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h index 374858cdcdaa..3e3f7e989e0b 100644 --- a/include/uapi/drm/sis_drm.h +++ b/include/uapi/drm/sis_drm.h @@ -27,6 +27,12 @@ #ifndef __SIS_DRM_H__ #define __SIS_DRM_H__ +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + /* SiS specific ioctls */ #define NOT_USED_0_3 #define DRM_SIS_FB_ALLOC 0x04 @@ -64,4 +70,8 @@ typedef struct { unsigned long offset, size; } drm_sis_fb_t; +#if defined(__cplusplus) +} +#endif + #endif /* __SIS_DRM_H__ */ diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h index 27d0b054aed0..d954f8c33321 100644 --- a/include/uapi/drm/tegra_drm.h +++ b/include/uapi/drm/tegra_drm.h @@ -25,6 +25,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) @@ -198,4 +202,8 @@ struct drm_tegra_gem_get_flags { #define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags) #define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags) +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index eeb37e394f13..af12e8a184c8 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -26,6 +26,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_VC4_SUBMIT_CL 0x00 #define DRM_VC4_WAIT_SEQNO 0x01 #define DRM_VC4_WAIT_BO 0x02 @@ -276,4 +280,8 @@ struct drm_vc4_get_hang_state { __u32 pad[16]; }; +#if defined(__cplusplus) +} +#endif + #endif /* _UAPI_VC4_DRM_H_ */ diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h index fa21ed185520..a1e125d42208 100644 --- a/include/uapi/drm/via_drm.h +++ b/include/uapi/drm/via_drm.h @@ -26,6 +26,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. */ @@ -271,4 +275,8 @@ typedef struct drm_via_dmablit { drm_via_blitsync_t sync; } drm_via_dmablit_t; +#if defined(__cplusplus) +} +#endif + #endif /* _VIA_DRM_H_ */ diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index c74f1f90cb37..91a31ffed828 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -26,6 +26,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * @@ -163,4 +167,8 @@ struct drm_virtgpu_get_caps { DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ struct drm_virtgpu_get_caps) +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 5b68b4d10884..d325a4107916 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -30,6 +30,10 @@ #include "drm.h" +#if defined(__cplusplus) +extern "C" { +#endif + #define DRM_VMW_MAX_SURFACE_FACES 6 #define DRM_VMW_MAX_MIP_LEVELS 24 @@ -1087,4 +1091,9 @@ union drm_vmw_extended_context_arg { enum drm_vmw_extended_context req; struct drm_vmw_context_arg rep; }; + +#if defined(__cplusplus) +} +#endif + #endif diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index dea893199257..2bdd1e3e7007 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -23,6 +23,7 @@ #define BTRFS_IOCTL_MAGIC 0x94 #define BTRFS_VOL_NAME_MAX 255 +#define BTRFS_LABEL_SIZE 256 /* this should be 4k */ #define BTRFS_PATH_NAME_MAX 4087 @@ -33,14 +34,31 @@ struct btrfs_ioctl_vol_args { #define BTRFS_DEVICE_PATH_NAME_MAX 1024 -#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) -#define BTRFS_SUBVOL_RDONLY (1ULL << 1) -#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) +#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3) + +#define BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED \ + (BTRFS_SUBVOL_CREATE_ASYNC | \ + BTRFS_SUBVOL_RDONLY | \ + BTRFS_SUBVOL_QGROUP_INHERIT | \ + BTRFS_DEVICE_SPEC_BY_ID) + #define BTRFS_FSID_SIZE 16 #define BTRFS_UUID_SIZE 16 #define BTRFS_UUID_UNPARSED_SIZE 37 -#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0) +/* + * flags definition for qgroup limits + * + * Used by: + * struct btrfs_qgroup_limit.flags + * struct btrfs_qgroup_limit_item.flags + */ +#define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) +#define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) +#define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) +#define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) +#define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) +#define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) struct btrfs_qgroup_limit { __u64 flags; @@ -50,6 +68,14 @@ struct btrfs_qgroup_limit { __u64 rsv_excl; }; +/* + * flags definition for qgroup inheritance + * + * Used by: + * struct btrfs_qgroup_inherit.flags + */ +#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0) + struct btrfs_qgroup_inherit { __u64 flags; __u64 num_qgroups; @@ -64,6 +90,20 @@ struct btrfs_ioctl_qgroup_limit_args { struct btrfs_qgroup_limit lim; }; +/* + * flags for subvolumes + * + * Used by: + * struct btrfs_ioctl_vol_args_v2.flags + * + * BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls: + * - BTRFS_IOC_SUBVOL_GETFLAGS + * - BTRFS_IOC_SUBVOL_SETFLAGS + */ +#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) +#define BTRFS_SUBVOL_RDONLY (1ULL << 1) +#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) + #define BTRFS_SUBVOL_NAME_MAX 4039 struct btrfs_ioctl_vol_args_v2 { __s64 fd; @@ -76,7 +116,10 @@ struct btrfs_ioctl_vol_args_v2 { }; __u64 unused[4]; }; - char name[BTRFS_SUBVOL_NAME_MAX + 1]; + union { + char name[BTRFS_SUBVOL_NAME_MAX + 1]; + __u64 devid; + }; }; /* @@ -190,6 +233,37 @@ struct btrfs_ioctl_fs_info_args { __u64 reserved[122]; /* pad to 1k */ }; +/* + * feature flags + * + * Used by: + * struct btrfs_ioctl_feature_flags + */ +#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) + +#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) +#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) +#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) +#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) +/* + * some patches floated around with a second compression method + * lets save that incompat here for when they do get in + * Note we don't actually support it, we're just reserving the + * number + */ +#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) + +/* + * older kernels tried to do bigger metadata blocks, but the + * code was pretty buggy. Lets not let them try anymore. + */ +#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) + +#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) +#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) +#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) +#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) + struct btrfs_ioctl_feature_flags { __u64 compat_flags; __u64 compat_ro_flags; @@ -254,6 +328,70 @@ struct btrfs_balance_progress { __u64 completed; /* # of chunks relocated so far */ }; +/* + * flags definition for balance + * + * Restriper's general type filter + * + * Used by: + * btrfs_ioctl_balance_args.flags + * btrfs_balance_control.flags (internal) + */ +#define BTRFS_BALANCE_DATA (1ULL << 0) +#define BTRFS_BALANCE_SYSTEM (1ULL << 1) +#define BTRFS_BALANCE_METADATA (1ULL << 2) + +#define BTRFS_BALANCE_TYPE_MASK (BTRFS_BALANCE_DATA | \ + BTRFS_BALANCE_SYSTEM | \ + BTRFS_BALANCE_METADATA) + +#define BTRFS_BALANCE_FORCE (1ULL << 3) +#define BTRFS_BALANCE_RESUME (1ULL << 4) + +/* + * flags definitions for per-type balance args + * + * Balance filters + * + * Used by: + * struct btrfs_balance_args + */ +#define BTRFS_BALANCE_ARGS_PROFILES (1ULL << 0) +#define BTRFS_BALANCE_ARGS_USAGE (1ULL << 1) +#define BTRFS_BALANCE_ARGS_DEVID (1ULL << 2) +#define BTRFS_BALANCE_ARGS_DRANGE (1ULL << 3) +#define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) +#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) +#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) +#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) +#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10) + +#define BTRFS_BALANCE_ARGS_MASK \ + (BTRFS_BALANCE_ARGS_PROFILES | \ + BTRFS_BALANCE_ARGS_USAGE | \ + BTRFS_BALANCE_ARGS_DEVID | \ + BTRFS_BALANCE_ARGS_DRANGE | \ + BTRFS_BALANCE_ARGS_VRANGE | \ + BTRFS_BALANCE_ARGS_LIMIT | \ + BTRFS_BALANCE_ARGS_LIMIT_RANGE | \ + BTRFS_BALANCE_ARGS_STRIPES_RANGE | \ + BTRFS_BALANCE_ARGS_USAGE_RANGE) + +/* + * Profile changing flags. When SOFT is set we won't relocate chunk if + * it already has the target profile (even though it may be + * half-filled). + */ +#define BTRFS_BALANCE_ARGS_CONVERT (1ULL << 8) +#define BTRFS_BALANCE_ARGS_SOFT (1ULL << 9) + + +/* + * flags definition for balance state + * + * Used by: + * struct btrfs_ioctl_balance_args.state + */ #define BTRFS_BALANCE_STATE_RUNNING (1ULL << 0) #define BTRFS_BALANCE_STATE_PAUSE_REQ (1ULL << 1) #define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2) @@ -347,9 +485,45 @@ struct btrfs_ioctl_clone_range_args { __u64 dest_offset; }; -/* flags for the defrag range ioctl */ +/* + * flags definition for the defrag range ioctl + * + * Used by: + * struct btrfs_ioctl_defrag_range_args.flags + */ #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 +struct btrfs_ioctl_defrag_range_args { + /* start of the defrag operation */ + __u64 start; + + /* number of bytes to defrag, use (u64)-1 to say all */ + __u64 len; + + /* + * flags for the operation, which can include turning + * on compression for this one defrag + */ + __u64 flags; + + /* + * any extent bigger than this will be considered + * already defragged. Use 0 to take the kernel default + * Use 1 to say every single extent must be rewritten + */ + __u32 extent_thresh; + + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + + /* spare for later */ + __u32 unused[4]; +}; + #define BTRFS_SAME_DATA_DIFFERS 1 /* For extent-same ioctl */ @@ -659,5 +833,7 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code) struct btrfs_ioctl_feature_flags[2]) #define BTRFS_IOC_GET_SUPPORTED_FEATURES _IOR(BTRFS_IOCTL_MAGIC, 57, \ struct btrfs_ioctl_feature_flags[3]) +#define BTRFS_IOC_RM_DEV_V2 _IOW(BTRFS_IOCTL_MAGIC, 58, \ + struct btrfs_ioctl_vol_args_v2) #endif /* _UAPI_LINUX_BTRFS_H */ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h new file mode 100644 index 000000000000..d5ad15a106a7 --- /dev/null +++ b/include/uapi/linux/btrfs_tree.h @@ -0,0 +1,966 @@ +#ifndef _BTRFS_CTREE_H_ +#define _BTRFS_CTREE_H_ + +/* + * This header contains the structure definitions and constants used + * by file system objects that can be retrieved using + * the BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that + * is needed to describe a leaf node's key or item contents. + */ + +/* holds pointers to all of the tree roots */ +#define BTRFS_ROOT_TREE_OBJECTID 1ULL + +/* stores information about which extents are in use, and reference counts */ +#define BTRFS_EXTENT_TREE_OBJECTID 2ULL + +/* + * chunk tree stores translations from logical -> physical block numbering + * the super block points to the chunk tree + */ +#define BTRFS_CHUNK_TREE_OBJECTID 3ULL + +/* + * stores information about which areas of a given device are in use. + * one per device. The tree of tree roots points to the device tree + */ +#define BTRFS_DEV_TREE_OBJECTID 4ULL + +/* one per subvolume, storing files and directories */ +#define BTRFS_FS_TREE_OBJECTID 5ULL + +/* directory objectid inside the root tree */ +#define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL + +/* holds checksums of all the data extents */ +#define BTRFS_CSUM_TREE_OBJECTID 7ULL + +/* holds quota configuration and tracking */ +#define BTRFS_QUOTA_TREE_OBJECTID 8ULL + +/* for storing items that use the BTRFS_UUID_KEY* types */ +#define BTRFS_UUID_TREE_OBJECTID 9ULL + +/* tracks free space in block groups. */ +#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL + +/* device stats in the device tree */ +#define BTRFS_DEV_STATS_OBJECTID 0ULL + +/* for storing balance parameters in the root tree */ +#define BTRFS_BALANCE_OBJECTID -4ULL + +/* orhpan objectid for tracking unlinked/truncated files */ +#define BTRFS_ORPHAN_OBJECTID -5ULL + +/* does write ahead logging to speed up fsyncs */ +#define BTRFS_TREE_LOG_OBJECTID -6ULL +#define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL + +/* for space balancing */ +#define BTRFS_TREE_RELOC_OBJECTID -8ULL +#define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL + +/* + * extent checksums all have this objectid + * this allows them to share the logging tree + * for fsyncs + */ +#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL + +/* For storing free space cache */ +#define BTRFS_FREE_SPACE_OBJECTID -11ULL + +/* + * The inode number assigned to the special inode for storing + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + +/* dummy objectid represents multiple objectids */ +#define BTRFS_MULTIPLE_OBJECTIDS -255ULL + +/* + * All files have objectids in this range. + */ +#define BTRFS_FIRST_FREE_OBJECTID 256ULL +#define BTRFS_LAST_FREE_OBJECTID -256ULL +#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL + + +/* + * the device items go into the chunk tree. The key is in the form + * [ 1 BTRFS_DEV_ITEM_KEY device_id ] + */ +#define BTRFS_DEV_ITEMS_OBJECTID 1ULL + +#define BTRFS_BTREE_INODE_OBJECTID 1 + +#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 + +#define BTRFS_DEV_REPLACE_DEVID 0ULL + +/* + * inode items have the data typically returned from stat and store other + * info about object characteristics. There is one for every file and dir in + * the FS + */ +#define BTRFS_INODE_ITEM_KEY 1 +#define BTRFS_INODE_REF_KEY 12 +#define BTRFS_INODE_EXTREF_KEY 13 +#define BTRFS_XATTR_ITEM_KEY 24 +#define BTRFS_ORPHAN_ITEM_KEY 48 +/* reserve 2-15 close to the inode for later flexibility */ + +/* + * dir items are the name -> inode pointers in a directory. There is one + * for every name in a directory. + */ +#define BTRFS_DIR_LOG_ITEM_KEY 60 +#define BTRFS_DIR_LOG_INDEX_KEY 72 +#define BTRFS_DIR_ITEM_KEY 84 +#define BTRFS_DIR_INDEX_KEY 96 +/* + * extent data is for file data + */ +#define BTRFS_EXTENT_DATA_KEY 108 + +/* + * extent csums are stored in a separate tree and hold csums for + * an entire extent on disk. + */ +#define BTRFS_EXTENT_CSUM_KEY 128 + +/* + * root items point to tree roots. They are typically in the root + * tree used by the super block to find all the other trees + */ +#define BTRFS_ROOT_ITEM_KEY 132 + +/* + * root backrefs tie subvols and snapshots to the directory entries that + * reference them + */ +#define BTRFS_ROOT_BACKREF_KEY 144 + +/* + * root refs make a fast index for listing all of the snapshots and + * subvolumes referenced by a given root. They point directly to the + * directory item in the root that references the subvol + */ +#define BTRFS_ROOT_REF_KEY 156 + +/* + * extent items are in the extent map tree. These record which blocks + * are used, and how many references there are to each block + */ +#define BTRFS_EXTENT_ITEM_KEY 168 + +/* + * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know + * the length, so we save the level in key->offset instead of the length. + */ +#define BTRFS_METADATA_ITEM_KEY 169 + +#define BTRFS_TREE_BLOCK_REF_KEY 176 + +#define BTRFS_EXTENT_DATA_REF_KEY 178 + +#define BTRFS_EXTENT_REF_V0_KEY 180 + +#define BTRFS_SHARED_BLOCK_REF_KEY 182 + +#define BTRFS_SHARED_DATA_REF_KEY 184 + +/* + * block groups give us hints into the extent allocation trees. Which + * blocks are free etc etc + */ +#define BTRFS_BLOCK_GROUP_ITEM_KEY 192 + +/* + * Every block group is represented in the free space tree by a free space info + * item, which stores some accounting information. It is keyed on + * (block_group_start, FREE_SPACE_INFO, block_group_length). + */ +#define BTRFS_FREE_SPACE_INFO_KEY 198 + +/* + * A free space extent tracks an extent of space that is free in a block group. + * It is keyed on (start, FREE_SPACE_EXTENT, length). + */ +#define BTRFS_FREE_SPACE_EXTENT_KEY 199 + +/* + * When a block group becomes very fragmented, we convert it to use bitmaps + * instead of extents. A free space bitmap is keyed on + * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with + * (length / sectorsize) bits. + */ +#define BTRFS_FREE_SPACE_BITMAP_KEY 200 + +#define BTRFS_DEV_EXTENT_KEY 204 +#define BTRFS_DEV_ITEM_KEY 216 +#define BTRFS_CHUNK_ITEM_KEY 228 + +/* + * Records the overall state of the qgroups. + * There's only one instance of this key present, + * (0, BTRFS_QGROUP_STATUS_KEY, 0) + */ +#define BTRFS_QGROUP_STATUS_KEY 240 +/* + * Records the currently used space of the qgroup. + * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). + */ +#define BTRFS_QGROUP_INFO_KEY 242 +/* + * Contains the user configured limits for the qgroup. + * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). + */ +#define BTRFS_QGROUP_LIMIT_KEY 244 +/* + * Records the child-parent relationship of qgroups. For + * each relation, 2 keys are present: + * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) + * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) + */ +#define BTRFS_QGROUP_RELATION_KEY 246 + +/* + * Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY. + */ +#define BTRFS_BALANCE_ITEM_KEY 248 + +/* + * The key type for tree items that are stored persistently, but do not need to + * exist for extended period of time. The items can exist in any tree. + * + * [subtype, BTRFS_TEMPORARY_ITEM_KEY, data] + * + * Existing items: + * + * - balance status item + * (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0) + */ +#define BTRFS_TEMPORARY_ITEM_KEY 248 + +/* + * Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY + */ +#define BTRFS_DEV_STATS_KEY 249 + +/* + * The key type for tree items that are stored persistently and usually exist + * for a long period, eg. filesystem lifetime. The item kinds can be status + * information, stats or preference values. The item can exist in any tree. + * + * [subtype, BTRFS_PERSISTENT_ITEM_KEY, data] + * + * Existing items: + * + * - device statistics, store IO stats in the device tree, one key for all + * stats + * (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0) + */ +#define BTRFS_PERSISTENT_ITEM_KEY 249 + +/* + * Persistantly stores the device replace state in the device tree. + * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). + */ +#define BTRFS_DEV_REPLACE_KEY 250 + +/* + * Stores items that allow to quickly map UUIDs to something else. + * These items are part of the filesystem UUID tree. + * The key is built like this: + * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). + */ +#if BTRFS_UUID_SIZE != 16 +#error "UUID items require BTRFS_UUID_SIZE == 16!" +#endif +#define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ +#define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to + * received subvols */ + +/* + * string items are for debugging. They just store a short string of + * data in the FS + */ +#define BTRFS_STRING_ITEM_KEY 253 + + + +/* 32 bytes in various csum fields */ +#define BTRFS_CSUM_SIZE 32 + +/* csum types */ +#define BTRFS_CSUM_TYPE_CRC32 0 + +/* + * flags definitions for directory entry item type + * + * Used by: + * struct btrfs_dir_item.type + */ +#define BTRFS_FT_UNKNOWN 0 +#define BTRFS_FT_REG_FILE 1 +#define BTRFS_FT_DIR 2 +#define BTRFS_FT_CHRDEV 3 +#define BTRFS_FT_BLKDEV 4 +#define BTRFS_FT_FIFO 5 +#define BTRFS_FT_SOCK 6 +#define BTRFS_FT_SYMLINK 7 +#define BTRFS_FT_XATTR 8 +#define BTRFS_FT_MAX 9 + +/* + * The key defines the order in the tree, and so it also defines (optimal) + * block layout. + * + * objectid corresponds to the inode number. + * + * type tells us things about the object, and is a kind of stream selector. + * so for a given inode, keys with type of 1 might refer to the inode data, + * type of 2 may point to file data in the btree and type == 3 may point to + * extents. + * + * offset is the starting byte offset for this key in the stream. + * + * btrfs_disk_key is in disk byte order. struct btrfs_key is always + * in cpu native order. Otherwise they are identical and their sizes + * should be the same (ie both packed) + */ +struct btrfs_disk_key { + __le64 objectid; + __u8 type; + __le64 offset; +} __attribute__ ((__packed__)); + +struct btrfs_key { + __u64 objectid; + __u8 type; + __u64 offset; +} __attribute__ ((__packed__)); + +struct btrfs_dev_item { + /* the internal btrfs device id */ + __le64 devid; + + /* size of the device */ + __le64 total_bytes; + + /* bytes used */ + __le64 bytes_used; + + /* optimal io alignment for this device */ + __le32 io_align; + + /* optimal io width for this device */ + __le32 io_width; + + /* minimal io size for this device */ + __le32 sector_size; + + /* type and info about this device */ + __le64 type; + + /* expected generation for this device */ + __le64 generation; + + /* + * starting byte of this partition on the device, + * to allow for stripe alignment in the future + */ + __le64 start_offset; + + /* grouping information for allocation decisions */ + __le32 dev_group; + + /* seek speed 0-100 where 100 is fastest */ + __u8 seek_speed; + + /* bandwidth 0-100 where 100 is fastest */ + __u8 bandwidth; + + /* btrfs generated uuid for this device */ + __u8 uuid[BTRFS_UUID_SIZE]; + + /* uuid of FS who owns this device */ + __u8 fsid[BTRFS_UUID_SIZE]; +} __attribute__ ((__packed__)); + +struct btrfs_stripe { + __le64 devid; + __le64 offset; + __u8 dev_uuid[BTRFS_UUID_SIZE]; +} __attribute__ ((__packed__)); + +struct btrfs_chunk { + /* size of this chunk in bytes */ + __le64 length; + + /* objectid of the root referencing this chunk */ + __le64 owner; + + __le64 stripe_len; + __le64 type; + + /* optimal io alignment for this chunk */ + __le32 io_align; + + /* optimal io width for this chunk */ + __le32 io_width; + + /* minimal io size for this chunk */ + __le32 sector_size; + + /* 2^16 stripes is quite a lot, a second limit is the size of a single + * item in the btree + */ + __le16 num_stripes; + + /* sub stripes only matter for raid10 */ + __le16 sub_stripes; + struct btrfs_stripe stripe; + /* additional stripes go here */ +} __attribute__ ((__packed__)); + +#define BTRFS_FREE_SPACE_EXTENT 1 +#define BTRFS_FREE_SPACE_BITMAP 2 + +struct btrfs_free_space_entry { + __le64 offset; + __le64 bytes; + __u8 type; +} __attribute__ ((__packed__)); + +struct btrfs_free_space_header { + struct btrfs_disk_key location; + __le64 generation; + __le64 num_entries; + __le64 num_bitmaps; +} __attribute__ ((__packed__)); + +#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) +#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) + +/* Super block flags */ +/* Errors detected */ +#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) + +#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) +#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) + + +/* + * items in the extent btree are used to record the objectid of the + * owner of the block and the number of references + */ + +struct btrfs_extent_item { + __le64 refs; + __le64 generation; + __le64 flags; +} __attribute__ ((__packed__)); + +struct btrfs_extent_item_v0 { + __le32 refs; +} __attribute__ ((__packed__)); + + +#define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) +#define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) + +/* following flags only apply to tree blocks */ + +/* use full backrefs for extent pointers in the block */ +#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) + +/* + * this flag is only used internally by scrub and may be changed at any time + * it is only declared here to avoid collisions + */ +#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) + +struct btrfs_tree_block_info { + struct btrfs_disk_key key; + __u8 level; +} __attribute__ ((__packed__)); + +struct btrfs_extent_data_ref { + __le64 root; + __le64 objectid; + __le64 offset; + __le32 count; +} __attribute__ ((__packed__)); + +struct btrfs_shared_data_ref { + __le32 count; +} __attribute__ ((__packed__)); + +struct btrfs_extent_inline_ref { + __u8 type; + __le64 offset; +} __attribute__ ((__packed__)); + +/* old style backrefs item */ +struct btrfs_extent_ref_v0 { + __le64 root; + __le64 generation; + __le64 objectid; + __le32 count; +} __attribute__ ((__packed__)); + + +/* dev extents record free space on individual devices. The owner + * field points back to the chunk allocation mapping tree that allocated + * the extent. The chunk tree uuid field is a way to double check the owner + */ +struct btrfs_dev_extent { + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 chunk_offset; + __le64 length; + __u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; +} __attribute__ ((__packed__)); + +struct btrfs_inode_ref { + __le64 index; + __le16 name_len; + /* name goes here */ +} __attribute__ ((__packed__)); + +struct btrfs_inode_extref { + __le64 parent_objectid; + __le64 index; + __le16 name_len; + __u8 name[0]; + /* name goes here */ +} __attribute__ ((__packed__)); + +struct btrfs_timespec { + __le64 sec; + __le32 nsec; +} __attribute__ ((__packed__)); + +struct btrfs_inode_item { + /* nfs style generation number */ + __le64 generation; + /* transid that last touched this inode */ + __le64 transid; + __le64 size; + __le64 nbytes; + __le64 block_group; + __le32 nlink; + __le32 uid; + __le32 gid; + __le32 mode; + __le64 rdev; + __le64 flags; + + /* modification sequence number for NFS */ + __le64 sequence; + + /* + * a little future expansion, for more than this we can + * just grow the inode item and version it + */ + __le64 reserved[4]; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; +} __attribute__ ((__packed__)); + +struct btrfs_dir_log_item { + __le64 end; +} __attribute__ ((__packed__)); + +struct btrfs_dir_item { + struct btrfs_disk_key location; + __le64 transid; + __le16 data_len; + __le16 name_len; + __u8 type; +} __attribute__ ((__packed__)); + +#define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) + +/* + * Internal in-memory flag that a subvolume has been marked for deletion but + * still visible as a directory + */ +#define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) + +struct btrfs_root_item { + struct btrfs_inode_item inode; + __le64 generation; + __le64 root_dirid; + __le64 bytenr; + __le64 byte_limit; + __le64 bytes_used; + __le64 last_snapshot; + __le64 flags; + __le32 refs; + struct btrfs_disk_key drop_progress; + __u8 drop_level; + __u8 level; + + /* + * The following fields appear after subvol_uuids+subvol_times + * were introduced. + */ + + /* + * This generation number is used to test if the new fields are valid + * and up to date while reading the root item. Every time the root item + * is written out, the "generation" field is copied into this field. If + * anyone ever mounted the fs with an older kernel, we will have + * mismatching generation values here and thus must invalidate the + * new fields. See btrfs_update_root and btrfs_find_last_root for + * details. + * the offset of generation_v2 is also used as the start for the memset + * when invalidating the fields. + */ + __le64 generation_v2; + __u8 uuid[BTRFS_UUID_SIZE]; + __u8 parent_uuid[BTRFS_UUID_SIZE]; + __u8 received_uuid[BTRFS_UUID_SIZE]; + __le64 ctransid; /* updated when an inode changes */ + __le64 otransid; /* trans when created */ + __le64 stransid; /* trans when sent. non-zero for received subvol */ + __le64 rtransid; /* trans when received. non-zero for received subvol */ + struct btrfs_timespec ctime; + struct btrfs_timespec otime; + struct btrfs_timespec stime; + struct btrfs_timespec rtime; + __le64 reserved[8]; /* for future */ +} __attribute__ ((__packed__)); + +/* + * this is used for both forward and backward root refs + */ +struct btrfs_root_ref { + __le64 dirid; + __le64 sequence; + __le16 name_len; +} __attribute__ ((__packed__)); + +struct btrfs_disk_balance_args { + /* + * profiles to operate on, single is denoted by + * BTRFS_AVAIL_ALLOC_BIT_SINGLE + */ + __le64 profiles; + + /* + * usage filter + * BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N' + * BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max + */ + union { + __le64 usage; + struct { + __le32 usage_min; + __le32 usage_max; + }; + }; + + /* devid filter */ + __le64 devid; + + /* devid subset filter [pstart..pend) */ + __le64 pstart; + __le64 pend; + + /* btrfs virtual address space subset filter [vstart..vend) */ + __le64 vstart; + __le64 vend; + + /* + * profile to convert to, single is denoted by + * BTRFS_AVAIL_ALLOC_BIT_SINGLE + */ + __le64 target; + + /* BTRFS_BALANCE_ARGS_* */ + __le64 flags; + + /* + * BTRFS_BALANCE_ARGS_LIMIT with value 'limit' + * BTRFS_BALANCE_ARGS_LIMIT_RANGE - the extend version can use minimum + * and maximum + */ + union { + __le64 limit; + struct { + __le32 limit_min; + __le32 limit_max; + }; + }; + + /* + * Process chunks that cross stripes_min..stripes_max devices, + * BTRFS_BALANCE_ARGS_STRIPES_RANGE + */ + __le32 stripes_min; + __le32 stripes_max; + + __le64 unused[6]; +} __attribute__ ((__packed__)); + +/* + * store balance parameters to disk so that balance can be properly + * resumed after crash or unmount + */ +struct btrfs_balance_item { + /* BTRFS_BALANCE_* */ + __le64 flags; + + struct btrfs_disk_balance_args data; + struct btrfs_disk_balance_args meta; + struct btrfs_disk_balance_args sys; + + __le64 unused[4]; +} __attribute__ ((__packed__)); + +#define BTRFS_FILE_EXTENT_INLINE 0 +#define BTRFS_FILE_EXTENT_REG 1 +#define BTRFS_FILE_EXTENT_PREALLOC 2 + +struct btrfs_file_extent_item { + /* + * transaction id that created this extent + */ + __le64 generation; + /* + * max number of bytes to hold this extent in ram + * when we split a compressed extent we can't know how big + * each of the resulting pieces will be. So, this is + * an upper limit on the size of the extent in ram instead of + * an exact limit. + */ + __le64 ram_bytes; + + /* + * 32 bits for the various ways we might encode the data, + * including compression and encryption. If any of these + * are set to something a given disk format doesn't understand + * it is treated like an incompat flag for reading and writing, + * but not for stat. + */ + __u8 compression; + __u8 encryption; + __le16 other_encoding; /* spare for later use */ + + /* are we inline data or a real extent? */ + __u8 type; + + /* + * disk space consumed by the extent, checksum blocks are included + * in these numbers + * + * At this offset in the structure, the inline extent data start. + */ + __le64 disk_bytenr; + __le64 disk_num_bytes; + /* + * the logical offset in file blocks (no csums) + * this extent record is for. This allows a file extent to point + * into the middle of an existing extent on disk, sharing it + * between two snapshots (useful if some bytes in the middle of the + * extent have changed + */ + __le64 offset; + /* + * the logical number of file blocks (no csums included). This + * always reflects the size uncompressed and without encoding. + */ + __le64 num_bytes; + +} __attribute__ ((__packed__)); + +struct btrfs_csum_item { + __u8 csum; +} __attribute__ ((__packed__)); + +struct btrfs_dev_stats_item { + /* + * grow this item struct at the end for future enhancements and keep + * the existing values unchanged + */ + __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; +} __attribute__ ((__packed__)); + +#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 +#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 +#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 +#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 +#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 +#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 +#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 + +struct btrfs_dev_replace_item { + /* + * grow this item struct at the end for future enhancements and keep + * the existing values unchanged + */ + __le64 src_devid; + __le64 cursor_left; + __le64 cursor_right; + __le64 cont_reading_from_srcdev_mode; + + __le64 replace_state; + __le64 time_started; + __le64 time_stopped; + __le64 num_write_errors; + __le64 num_uncorrectable_read_errors; +} __attribute__ ((__packed__)); + +/* different types of block groups (and chunks) */ +#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) +#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) +#define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) +#define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) +#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) +#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) +#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) +#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) +#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) +#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ + BTRFS_SPACE_INFO_GLOBAL_RSV) + +enum btrfs_raid_types { + BTRFS_RAID_RAID10, + BTRFS_RAID_RAID1, + BTRFS_RAID_DUP, + BTRFS_RAID_RAID0, + BTRFS_RAID_SINGLE, + BTRFS_RAID_RAID5, + BTRFS_RAID_RAID6, + BTRFS_NR_RAID_TYPES +}; + +#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ + BTRFS_BLOCK_GROUP_SYSTEM | \ + BTRFS_BLOCK_GROUP_METADATA) + +#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ + BTRFS_BLOCK_GROUP_RAID1 | \ + BTRFS_BLOCK_GROUP_RAID5 | \ + BTRFS_BLOCK_GROUP_RAID6 | \ + BTRFS_BLOCK_GROUP_DUP | \ + BTRFS_BLOCK_GROUP_RAID10) +#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ + BTRFS_BLOCK_GROUP_RAID6) + +/* + * We need a bit for restriper to be able to tell when chunks of type + * SINGLE are available. This "extended" profile format is used in + * fs_info->avail_*_alloc_bits (in-memory) and balance item fields + * (on-disk). The corresponding on-disk bit in chunk.type is reserved + * to avoid remappings between two formats in future. + */ +#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) + +/* + * A fake block group type that is used to communicate global block reserve + * size to userspace via the SPACE_INFO ioctl. + */ +#define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) + +#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ + BTRFS_AVAIL_ALLOC_BIT_SINGLE) + +static inline __u64 chunk_to_extended(__u64 flags) +{ + if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) + flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; + + return flags; +} +static inline __u64 extended_to_chunk(__u64 flags) +{ + return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; +} + +struct btrfs_block_group_item { + __le64 used; + __le64 chunk_objectid; + __le64 flags; +} __attribute__ ((__packed__)); + +struct btrfs_free_space_info { + __le32 extent_count; + __le32 flags; +} __attribute__ ((__packed__)); + +#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) + +#define BTRFS_QGROUP_LEVEL_SHIFT 48 +static inline __u64 btrfs_qgroup_level(__u64 qgroupid) +{ + return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; +} + +/* + * is subvolume quota turned on? + */ +#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) +/* + * RESCAN is set during the initialization phase + */ +#define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) +/* + * Some qgroup entries are known to be out of date, + * either because the configuration has changed in a way that + * makes a rescan necessary, or because the fs has been mounted + * with a non-qgroup-aware version. + * Turning qouta off and on again makes it inconsistent, too. + */ +#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) + +#define BTRFS_QGROUP_STATUS_VERSION 1 + +struct btrfs_qgroup_status_item { + __le64 version; + /* + * the generation is updated during every commit. As older + * versions of btrfs are not aware of qgroups, it will be + * possible to detect inconsistencies by checking the + * generation on mount time + */ + __le64 generation; + + /* flag definitions see above */ + __le64 flags; + + /* + * only used during scanning to record the progress + * of the scan. It contains a logical address + */ + __le64 rescan; +} __attribute__ ((__packed__)); + +struct btrfs_qgroup_info_item { + __le64 generation; + __le64 rfer; + __le64 rfer_cmpr; + __le64 excl; + __le64 excl_cmpr; +} __attribute__ ((__packed__)); + +struct btrfs_qgroup_limit_item { + /* + * only updated when any of the other values change + */ + __le64 flags; + __le64 max_rfer; + __le64 max_excl; + __le64 rsv_rfer; + __le64 rsv_excl; +} __attribute__ ((__packed__)); + +#endif /* _BTRFS_CTREE_H_ */ diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h new file mode 100644 index 000000000000..7e4272cf1fb2 --- /dev/null +++ b/include/uapi/linux/coresight-stm.h @@ -0,0 +1,21 @@ +#ifndef __UAPI_CORESIGHT_STM_H_ +#define __UAPI_CORESIGHT_STM_H_ + +#define STM_FLAG_TIMESTAMPED BIT(3) +#define STM_FLAG_GUARANTEED BIT(7) + +/* + * The CoreSight STM supports guaranteed and invariant timing + * transactions. Guaranteed transactions are guaranteed to be + * traced, this might involve stalling the bus or system to + * ensure the transaction is accepted by the STM. While invariant + * timing transactions are not guaranteed to be traced, they + * will take an invariant amount of time regardless of the + * state of the STM. + */ +enum { + STM_OPTION_GUARANTEED = 0, + STM_OPTION_INVARIANT, +}; + +#endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9222db8ccccc..5f030b46cff4 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* @@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, + = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index e21fe04acc12..3b00f7c8943f 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -222,7 +222,6 @@ struct fsxattr { #define BLKSECDISCARD _IO(0x12,125) #define BLKROTATIONAL _IO(0x12,126) #define BLKZEROOUT _IO(0x12,127) -#define BLKDAXGET _IO(0x12,129) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index ca1054dd8249..72a04a0e8cce 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -1,5 +1,5 @@ #ifndef _UAPI_LINUX_GTP_H_ -#define _UAPI_LINUX_GTP_H__ +#define _UAPI_LINUX_GTP_H_ enum gtp_genl_cmds { GTP_CMD_NEWPDP, diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h index b0a7dd61eb35..adcbef4bff61 100644 --- a/include/uapi/linux/i2c.h +++ b/include/uapi/linux/i2c.h @@ -68,14 +68,15 @@ struct i2c_msg { __u16 addr; /* slave address */ __u16 flags; -#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ #define I2C_M_RD 0x0001 /* read data, from slave to master */ -#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */ -#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ -#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ + /* I2C_M_RD is guaranteed to be 0x0001! */ +#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ #define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ +#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */ +#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */ __u16 len; /* msg length */ __u8 *buf; /* pointer to msg data */ }; diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index c077617f3304..b0916fc72cce 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -38,6 +38,7 @@ enum iio_chan_type { IIO_CONCENTRATION, IIO_RESISTANCE, IIO_PH, + IIO_UVINDEX, }; enum iio_modifier { @@ -77,6 +78,7 @@ enum iio_modifier { IIO_MOD_Q, IIO_MOD_CO2, IIO_MOD_VOC, + IIO_MOD_LIGHT_UV, }; enum iio_event_type { diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 840cb990abe2..86eddd6241f3 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -12,6 +12,8 @@ #ifndef _LINUX_KEYCTL_H #define _LINUX_KEYCTL_H +#include <linux/types.h> + /* special process keyring shortcut IDs */ #define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */ #define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */ @@ -57,5 +59,13 @@ #define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */ #define KEYCTL_INVALIDATE 21 /* invalidate a key */ #define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */ +#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */ + +/* keyctl structures */ +struct keyctl_dh_params { + __s32 private; + __s32 prime; + __s32 base; +}; #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a7f1f8032ec1..05ebf475104c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -865,6 +865,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_SPAPR_TCE_64 125 #define KVM_CAP_ARM_PMU_V3 126 #define KVM_CAP_VCPU_ATTRIBUTES 127 +#define KVM_CAP_MAX_VCPU_ID 128 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index d5e38c73377c..e4f048ee7043 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h @@ -52,7 +52,7 @@ #if defined(__GLIBC__) /* Coordinate with glibc net/if.h header. */ -#if defined(_NET_IF_H) +#if defined(_NET_IF_H) && defined(__USE_MISC) /* GLIBC headers included first so don't define anything * that would already be defined. */ diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 7cc28ab05b87..309915f74492 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015, Intel Corporation. + * Copyright (c) 2014-2016, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU Lesser General Public License, @@ -20,11 +20,45 @@ struct nd_cmd_smart { __u8 data[128]; } __packed; +#define ND_SMART_HEALTH_VALID (1 << 0) +#define ND_SMART_TEMP_VALID (1 << 1) +#define ND_SMART_SPARES_VALID (1 << 2) +#define ND_SMART_ALARM_VALID (1 << 3) +#define ND_SMART_USED_VALID (1 << 4) +#define ND_SMART_SHUTDOWN_VALID (1 << 5) +#define ND_SMART_VENDOR_VALID (1 << 6) +#define ND_SMART_TEMP_TRIP (1 << 0) +#define ND_SMART_SPARE_TRIP (1 << 1) +#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0) +#define ND_SMART_CRITICAL_HEALTH (1 << 1) +#define ND_SMART_FATAL_HEALTH (1 << 2) + +struct nd_smart_payload { + __u32 flags; + __u8 reserved0[4]; + __u8 health; + __u16 temperature; + __u8 spares; + __u8 alarm_flags; + __u8 life_used; + __u8 shutdown_state; + __u8 reserved1; + __u32 vendor_size; + __u8 vendor_data[108]; +} __packed; + struct nd_cmd_smart_threshold { __u32 status; __u8 data[8]; } __packed; +struct nd_smart_threshold_payload { + __u16 alarm_control; + __u16 temperature; + __u8 spares; + __u8 reserved[3]; +} __packed; + struct nd_cmd_dimm_flags { __u32 status; __u32 flags; @@ -125,6 +159,7 @@ enum { ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7, ND_CMD_VENDOR_EFFECT_LOG = 8, ND_CMD_VENDOR = 9, + ND_CMD_CALL = 10, }; enum { @@ -158,6 +193,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size", [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log", [ND_CMD_VENDOR] = "vendor", + [ND_CMD_CALL] = "cmd_call", }; if (cmd < ARRAY_SIZE(names) && names[cmd]) @@ -206,6 +242,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) #define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */ #define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */ #define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */ +#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */ enum nd_driver_flags { ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM, @@ -214,6 +251,7 @@ enum nd_driver_flags { ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO, ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM, ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK, + ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, }; enum { @@ -224,4 +262,44 @@ enum ars_masks { ARS_STATUS_MASK = 0x0000FFFF, ARS_EXT_STATUS_SHIFT = 16, }; + +/* + * struct nd_cmd_pkg + * + * is a wrapper to a quasi pass thru interface for invoking firmware + * associated with nvdimms. + * + * INPUT PARAMETERS + * + * nd_family corresponds to the firmware (e.g. DSM) interface. + * + * nd_command are the function index advertised by the firmware. + * + * nd_size_in is the size of the input parameters being passed to firmware + * + * OUTPUT PARAMETERS + * + * nd_fw_size is the size of the data firmware wants to return for + * the call. If nd_fw_size is greater than size of nd_size_out, only + * the first nd_size_out bytes are returned. + */ + +struct nd_cmd_pkg { + __u64 nd_family; /* family of commands */ + __u64 nd_command; + __u32 nd_size_in; /* INPUT: size of input args */ + __u32 nd_size_out; /* INPUT: size of payload */ + __u32 nd_reserved2[9]; /* reserved must be zero */ + __u32 nd_fw_size; /* OUTPUT: size fw wants to return */ + unsigned char nd_payload[]; /* Contents of call */ +}; + +/* These NVDIMM families represent pre-standardization command sets */ +#define NVDIMM_FAMILY_INTEL 0 +#define NVDIMM_FAMILY_HPE1 1 +#define NVDIMM_FAMILY_HPE2 2 + +#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ + struct nd_cmd_pkg) + #endif /* __NDCTL_H__ */ diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index c4b2a3f90829..50ff21f748b6 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -61,5 +61,6 @@ struct nvme_passthru_cmd { #define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) #define NVME_IOCTL_RESET _IO('N', 0x44) #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) +#define NVME_IOCTL_RESCAN _IO('N', 0x46) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 1becea86c73c..404095124ae2 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -670,7 +670,8 @@ #define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */ #define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ #define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID +#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -946,4 +947,21 @@ #define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */ #define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */ +/* Downstream Port Containment */ +#define PCI_EXP_DPC_CAP 4 /* DPC Capability */ +#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */ +#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */ +#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */ +#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ + +#define PCI_EXP_DPC_CTL 6 /* DPC control */ +#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */ +#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */ + +#define PCI_EXP_DPC_STATUS 8 /* DPC Status */ +#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ +#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ + +#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 43fc8d213472..36ce552cf6a9 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -862,6 +862,7 @@ enum perf_event_type { }; #define PERF_MAX_STACK_DEPTH 127 +#define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index eba5914ba5d1..f4297c8a42fe 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -145,6 +145,8 @@ enum { TCA_POLICE_PEAKRATE, TCA_POLICE_AVRATE, TCA_POLICE_RESULT, + TCA_POLICE_TM, + TCA_POLICE_PAD, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; @@ -173,7 +175,7 @@ enum { TCA_U32_DIVISOR, TCA_U32_SEL, TCA_U32_POLICE, - TCA_U32_ACT, + TCA_U32_ACT, TCA_U32_INDEV, TCA_U32_PCNT, TCA_U32_MARK, diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index e513a4ee369b..99dbed8a8874 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -264,4 +264,10 @@ /* MVEBU UART */ #define PORT_MVEBU 114 +/* Microchip PIC32 UART */ +#define PORT_PIC32 115 + +/* MPS2 UART */ +#define PORT_MPS2UART 116 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h new file mode 100644 index 000000000000..413303d37b56 --- /dev/null +++ b/include/uapi/linux/sync_file.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SYNC_H +#define _UAPI_LINUX_SYNC_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct sync_merge_data - data passed to merge ioctl + * @name: name of new fence + * @fd2: file descriptor of second fence + * @fence: returns the fd of the new fence to userspace + * @flags: merge_data flags + * @pad: padding for 64-bit alignment, should always be zero + */ +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +/** + * struct sync_fence_info - detailed fence information + * @obj_name: name of parent sync_timeline +* @driver_name: name of driver implementing the parent +* @status: status of the fence 0:active 1:signaled <0:error + * @flags: fence_info flags + * @timestamp_ns: timestamp of status change in nanoseconds + */ +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +/** + * struct sync_file_info - data returned from fence info ioctl + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @flags: sync_file_info flags + * @num_fences number of fences in the sync_file + * @pad: padding for 64-bit alignment, should always be zero + * @sync_fence_info: pointer to array of structs sync_fence_info with all + * fences in the sync_file + */ +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + + __u64 sync_fence_info; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the + * old API to get weird errors when trying to handling sync_files. The API + * change happened during the de-stage of the Sync Framework when there was + * no upstream users available. + */ + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_file_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_file_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) + +#endif /* _UAPI_LINUX_SYNC_H */ diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h index 072e41e45ee2..66e4d8bcb16f 100644 --- a/include/uapi/linux/tty_flags.h +++ b/include/uapi/linux/tty_flags.h @@ -32,7 +32,13 @@ #define ASYNCB_MAGIC_MULTIPLIER 16 /* Use special CLK or divisor */ #define ASYNCB_LAST_USER 16 -/* Internal flags used only by kernel */ +/* + * Internal flags used only by kernel (read-only) + * + * WARNING: These flags are no longer used and have been superceded by the + * TTY_PORT_ flags in the iflags field (and not userspace-visible) + */ +#ifndef _KERNEL_ #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ #define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ @@ -43,7 +49,9 @@ #define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ #define ASYNCB_CONS_FLOW 23 /* flow control for console */ #define ASYNCB_FIRST_KERNEL 22 +#endif +/* Masks */ #define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) #define ASYNC_SUSPENDED (1U << ASYNCB_SUSPENDED) #define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) @@ -72,6 +80,8 @@ #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) +#ifndef _KERNEL_ +/* These flags are no longer used (and were always masked from userspace) */ #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) #define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) @@ -81,5 +91,6 @@ #define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) #define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) #define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) +#endif #endif diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index d5ce71607972..a8acc24765fe 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -105,6 +105,13 @@ #define USB_REQ_LOOPBACK_DATA_READ 0x16 #define USB_REQ_SET_INTERFACE_DS 0x17 +/* specific requests for USB Power Delivery */ +#define USB_REQ_GET_PARTNER_PDO 20 +#define USB_REQ_GET_BATTERY_STATUS 21 +#define USB_REQ_SET_PDO 22 +#define USB_REQ_GET_VDM 23 +#define USB_REQ_SEND_VDM 24 + /* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command, * used by hubs to put ports into a new L1 suspend state, except that it * forgot to define its number ... @@ -165,6 +172,22 @@ #define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ #define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ +/* + * Feature selectors from Table 9-8 USB Power Delivery spec + */ +#define USB_DEVICE_BATTERY_WAKE_MASK 40 +#define USB_DEVICE_OS_IS_PD_AWARE 41 +#define USB_DEVICE_POLICY_MODE 42 +#define USB_PORT_PR_SWAP 43 +#define USB_PORT_GOTO_MIN 44 +#define USB_PORT_RETURN_POWER 45 +#define USB_PORT_ACCEPT_PD_REQUEST 46 +#define USB_PORT_REJECT_PD_REQUEST 47 +#define USB_PORT_PORT_PD_RESET 48 +#define USB_PORT_C_PORT_PD_CHANGE 49 +#define USB_PORT_CABLE_PD_RESET 50 +#define USB_DEVICE_CHARGING_POLICY 54 + /** * struct usb_ctrlrequest - SETUP data for a USB device control request * @bRequestType: matches the USB bmRequestType field @@ -914,6 +937,104 @@ struct usb_ssp_cap_descriptor { } __attribute__((packed)); /* + * USB Power Delivery Capability Descriptor: + * Defines capabilities for PD + */ +/* Defines the various PD Capabilities of this device */ +#define USB_PD_POWER_DELIVERY_CAPABILITY 0x06 +/* Provides information on each battery supported by the device */ +#define USB_PD_BATTERY_INFO_CAPABILITY 0x07 +/* The Consumer characteristics of a Port on the device */ +#define USB_PD_PD_CONSUMER_PORT_CAPABILITY 0x08 +/* The provider characteristics of a Port on the device */ +#define USB_PD_PD_PROVIDER_PORT_CAPABILITY 0x09 + +struct usb_pd_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; /* set to USB_PD_POWER_DELIVERY_CAPABILITY */ + __u8 bReserved; + __le32 bmAttributes; +#define USB_PD_CAP_BATTERY_CHARGING (1 << 1) /* supports Battery Charging specification */ +#define USB_PD_CAP_USB_PD (1 << 2) /* supports USB Power Delivery specification */ +#define USB_PD_CAP_PROVIDER (1 << 3) /* can provide power */ +#define USB_PD_CAP_CONSUMER (1 << 4) /* can consume power */ +#define USB_PD_CAP_CHARGING_POLICY (1 << 5) /* supports CHARGING_POLICY feature */ +#define USB_PD_CAP_TYPE_C_CURRENT (1 << 6) /* supports power capabilities defined in the USB Type-C Specification */ + +#define USB_PD_CAP_PWR_AC (1 << 8) +#define USB_PD_CAP_PWR_BAT (1 << 9) +#define USB_PD_CAP_PWR_USE_V_BUS (1 << 14) + + __le16 bmProviderPorts; /* Bit zero refers to the UFP of the device */ + __le16 bmConsumerPorts; + __le16 bcdBCVersion; + __le16 bcdPDVersion; + __le16 bcdUSBTypeCVersion; +} __attribute__((packed)); + +struct usb_pd_cap_battery_info_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + /* Index of string descriptor shall contain the user friendly name for this battery */ + __u8 iBattery; + /* Index of string descriptor shall contain the Serial Number String for this battery */ + __u8 iSerial; + __u8 iManufacturer; + __u8 bBatteryId; /* uniquely identifies this battery in status Messages */ + __u8 bReserved; + /* + * Shall contain the Battery Charge value above which this + * battery is considered to be fully charged but not necessarily + * “topped off.” + */ + __le32 dwChargedThreshold; /* in mWh */ + /* + * Shall contain the minimum charge level of this battery such + * that above this threshold, a device can be assured of being + * able to power up successfully (see Battery Charging 1.2). + */ + __le32 dwWeakThreshold; /* in mWh */ + __le32 dwBatteryDesignCapacity; /* in mWh */ + __le32 dwBatteryLastFullchargeCapacity; /* in mWh */ +} __attribute__((packed)); + +struct usb_pd_cap_consumer_port_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 bmCapabilities; +/* port will oerate under: */ +#define USB_PD_CAP_CONSUMER_BC (1 << 0) /* BC */ +#define USB_PD_CAP_CONSUMER_PD (1 << 1) /* PD */ +#define USB_PD_CAP_CONSUMER_TYPE_C (1 << 2) /* USB Type-C Current */ + __le16 wMinVoltage; /* in 50mV units */ + __le16 wMaxVoltage; /* in 50mV units */ + __u16 wReserved; + __le32 dwMaxOperatingPower; /* in 10 mW - operating at steady state */ + __le32 dwMaxPeakPower; /* in 10mW units - operating at peak power */ + __le32 dwMaxPeakPowerTime; /* in 100ms units - duration of peak */ +#define USB_PD_CAP_CONSUMER_UNKNOWN_PEAK_POWER_TIME 0xffff +} __attribute__((packed)); + +struct usb_pd_cap_provider_port_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved1; + __u8 bmCapabilities; +/* port will oerate under: */ +#define USB_PD_CAP_PROVIDER_BC (1 << 0) /* BC */ +#define USB_PD_CAP_PROVIDER_PD (1 << 1) /* PD */ +#define USB_PD_CAP_PROVIDER_TYPE_C (1 << 2) /* USB Type-C Current */ + __u8 bNumOfPDObjects; + __u8 bReserved2; + __le32 wPowerDataObject[]; +} __attribute__((packed)); + +/* * Precision time measurement capability descriptor: advertised by devices and * hubs that support PTM */ diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h index 786f0773cc33..3738e5fb6a4d 100644 --- a/include/uapi/linux/uuid.h +++ b/include/uapi/linux/uuid.h @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _UAPI_LINUX_UUID_H_ diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h index 978578bd1895..f69034887e68 100644 --- a/include/uapi/linux/vt.h +++ b/include/uapi/linux/vt.h @@ -8,7 +8,6 @@ */ #define MIN_NR_CONSOLES 1 /* must be at least 1 */ #define MAX_NR_CONSOLES 63 /* serial lines start at 64 */ -#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */ /* Note: the ioctl VT_GETSTATE does not work for consoles 16 and higher (since it returns a short) */ diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 763bb6950402..0ec1da2ef652 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -228,7 +228,7 @@ struct nand_oobfree { * complete set of ECC information. The ioctl truncates the larger internal * structure to retain binary compatibility with the static declaration of the * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of - * the user struct, not the MAX size of the internal struct nand_ecclayout. + * the user struct, not the MAX size of the internal OOB layout representation. */ struct nand_ecclayout_user { __u32 eccbytes; diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index a533cecab14f..98bebf8bef55 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -66,7 +66,7 @@ * The major version changes when data structures change in an incompatible * way. The driver must be the same for initialization to succeed. */ -#define HFI1_USER_SWMAJOR 5 +#define HFI1_USER_SWMAJOR 6 /* * Minor version differences are always compatible @@ -75,7 +75,12 @@ * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ -#define HFI1_USER_SWMINOR 0 +#define HFI1_USER_SWMINOR 1 + +/* + * We will encode the major/minor inside a single 32bit version number. + */ +#define HFI1_SWMAJOR_SHIFT 16 /* * Set of HW and driver capability/feature bits. @@ -107,19 +112,6 @@ #define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) #define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2) -/* - * If the unit is specified via open, HFI choice is fixed. If port is - * specified, it's also fixed. Otherwise we try to spread contexts - * across ports and HFIs, using different algorithms. WITHIN is - * the old default, prior to this mechanism. - */ -#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then - * ports; this is the default */ -#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin - * active ports within), then next HFI */ -#define HFI1_ALG_COUNT 2 /* number of algorithm choices */ - - /* User commands. */ #define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */ #define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */ @@ -127,7 +119,6 @@ #define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ #define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ #define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ -#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */ #define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ #define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ @@ -135,13 +126,46 @@ #define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ #define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ #define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */ -/* separate EPROM commands from normal PSM commands */ -#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */ -#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */ -/* range 66-74 no longer used */ -#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */ -#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */ -#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */ +#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */ + +/* + * User IOCTLs can not go above 128 if they do then see common.h and change the + * base for the snoop ioctl + */ +#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */ + +/* + * Make the ioctls occupy the last 0xf0-0xff portion of the IB range + */ +#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0) + +struct hfi1_cmd; +#define HFI1_IOCTL_ASSIGN_CTXT \ + _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info) +#define HFI1_IOCTL_CTXT_INFO \ + _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info) +#define HFI1_IOCTL_USER_INFO \ + _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info) +#define HFI1_IOCTL_TID_UPDATE \ + _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info) +#define HFI1_IOCTL_TID_FREE \ + _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info) +#define HFI1_IOCTL_CREDIT_UPD \ + _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD)) +#define HFI1_IOCTL_RECV_CTRL \ + _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int) +#define HFI1_IOCTL_POLL_TYPE \ + _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int) +#define HFI1_IOCTL_ACK_EVENT \ + _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long) +#define HFI1_IOCTL_SET_PKEY \ + _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16) +#define HFI1_IOCTL_CTXT_RESET \ + _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET)) +#define HFI1_IOCTL_TID_INVAL_READ \ + _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info) +#define HFI1_IOCTL_GET_VERS \ + _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int) #define _HFI1_EVENT_FROZEN_BIT 0 #define _HFI1_EVENT_LINKDOWN_BIT 1 @@ -199,9 +223,7 @@ struct hfi1_user_info { * Should be set to HFI1_USER_SWVERSION. */ __u32 userversion; - __u16 pad; - /* HFI selection algorithm, if unit has not selected */ - __u16 hfi1_alg; + __u32 pad; /* * If two or more processes wish to share a context, each process * must set the subcontext_cnt and subcontext_id to the same @@ -243,12 +265,6 @@ struct hfi1_tid_info { __u32 length; }; -struct hfi1_cmd { - __u32 type; /* command type */ - __u32 len; /* length of struct pointed to by add */ - __u64 addr; /* pointer to user structure */ -}; - enum hfi1_sdma_comp_state { FREE = 0, QUEUED, diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 8126c143a519..b6543d73d20a 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -226,6 +226,7 @@ struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_odp_caps odp_caps; __u64 timestamp_mask; __u64 hca_core_clock; /* in KHZ */ + __u64 device_cap_flags_ex; }; struct ib_uverbs_query_port { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 6e373d151cad..02fe8390c18f 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -135,10 +135,12 @@ enum { * Local service operations: * RESOLVE - The client requests the local service to resolve a path. * SET_TIMEOUT - The local service requests the client to set the timeout. + * IP_RESOLVE - The client requests the local service to resolve an IP to GID. */ enum { RDMA_NL_LS_OP_RESOLVE = 0, RDMA_NL_LS_OP_SET_TIMEOUT, + RDMA_NL_LS_OP_IP_RESOLVE, RDMA_NL_LS_NUM_OPS }; @@ -176,6 +178,10 @@ struct rdma_ls_resolve_header { __u8 path_use; }; +struct rdma_ls_ip_resolve_header { + __u32 ifindex; +}; + /* Local service attribute type */ #define RDMA_NLA_F_MANDATORY (1 << 13) #define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \ @@ -193,6 +199,8 @@ struct rdma_ls_resolve_header { * TCLASS u8 * PKEY u16 cpu * QOS_CLASS u16 cpu + * IPV4 u32 BE + * IPV6 u8[16] BE */ enum { LS_NLA_TYPE_UNSPEC = 0, @@ -204,6 +212,8 @@ enum { LS_NLA_TYPE_TCLASS, LS_NLA_TYPE_PKEY, LS_NLA_TYPE_QOS_CLASS, + LS_NLA_TYPE_IPV4, + LS_NLA_TYPE_IPV6, LS_NLA_TYPE_MAX }; diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild index a7f27704f980..691984cb0b91 100644 --- a/include/uapi/sound/Kbuild +++ b/include/uapi/sound/Kbuild @@ -1,5 +1,6 @@ # UAPI Header export list header-y += asequencer.h +header-y += asoc.h header-y += asound.h header-y += asound_fm.h header-y += compress_offload.h @@ -10,3 +11,5 @@ header-y += hdsp.h header-y += hdspm.h header-y += sb16_csp.h header-y += sfnt_info.h +header-y += tlv.h +header-y += usb_stream.h diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index c4cc1e40b35c..e4701a3c6331 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -116,6 +116,14 @@ #define SND_SOC_TPLG_STREAM_PLAYBACK 0 #define SND_SOC_TPLG_STREAM_CAPTURE 1 +/* vendor tuple types */ +#define SND_SOC_TPLG_TUPLE_TYPE_UUID 0 +#define SND_SOC_TPLG_TUPLE_TYPE_STRING 1 +#define SND_SOC_TPLG_TUPLE_TYPE_BOOL 2 +#define SND_SOC_TPLG_TUPLE_TYPE_BYTE 3 +#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4 +#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5 + /* * Block Header. * This header precedes all object and object arrays below. @@ -132,6 +140,35 @@ struct snd_soc_tplg_hdr { __le32 count; /* number of elements in block */ } __attribute__((packed)); +/* vendor tuple for uuid */ +struct snd_soc_tplg_vendor_uuid_elem { + __le32 token; + char uuid[16]; +} __attribute__((packed)); + +/* vendor tuple for a bool/byte/short/word value */ +struct snd_soc_tplg_vendor_value_elem { + __le32 token; + __le32 value; +} __attribute__((packed)); + +/* vendor tuple for string */ +struct snd_soc_tplg_vendor_string_elem { + __le32 token; + char string[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; +} __attribute__((packed)); + +struct snd_soc_tplg_vendor_array { + __le32 size; /* size in bytes of the array, including all elements */ + __le32 type; /* SND_SOC_TPLG_TUPLE_TYPE_ */ + __le32 num_elems; /* number of elements in array */ + union { + struct snd_soc_tplg_vendor_uuid_elem uuid[0]; + struct snd_soc_tplg_vendor_value_elem value[0]; + struct snd_soc_tplg_vendor_string_elem string[0]; + }; +} __attribute__((packed)); + /* * Private data. * All topology objects may have private data that can be used by the driver or @@ -139,7 +176,10 @@ struct snd_soc_tplg_hdr { */ struct snd_soc_tplg_private { __le32 size; /* in bytes of private data */ - char data[0]; + union { + char data[0]; + struct snd_soc_tplg_vendor_array array[0]; + }; } __attribute__((packed)); /* @@ -383,7 +423,7 @@ struct snd_soc_tplg_pcm { __le32 size; /* in bytes of this structure */ char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - __le32 pcm_id; /* unique ID - used to match */ + __le32 pcm_id; /* unique ID - used to match with DAI link */ __le32 dai_id; /* unique ID - used to match */ __le32 playback; /* supports playback mode */ __le32 capture; /* supports capture mode */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 67bf49d8c944..609cadb8739d 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -672,7 +672,7 @@ enum { /* global timers (device member) */ #define SNDRV_TIMER_GLOBAL_SYSTEM 0 -#define SNDRV_TIMER_GLOBAL_RTC 1 +#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */ #define SNDRV_TIMER_GLOBAL_HPET 2 #define SNDRV_TIMER_GLOBAL_HRTIMER 3 diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index c1c1ca18abc0..0098a522d9f4 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -179,9 +179,9 @@ #define TRIGCON_TRIGMODE_W1BUF (1 << 10) #define TRIGCON_SWTRIGCMD_W0BUF (1 << 6) #define TRIGCON_TRIGMODE_W0BUF (1 << 5) -#define TRIGCON_HWTRIGMASK_I80_RGB (1 << 4) -#define TRIGCON_HWTRIGEN_I80_RGB (1 << 3) -#define TRIGCON_HWTRIG_INV_I80_RGB (1 << 2) +#define TRIGCON_HWTRIGMASK (1 << 4) +#define TRIGCON_HWTRIGEN (1 << 3) +#define TRIGCON_HWTRIG_INV (1 << 2) #define TRIGCON_SWTRIGCMD (1 << 1) #define TRIGCON_SWTRIGEN (1 << 0) diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index ad66589f2ae6..3a2a79401789 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -16,6 +16,7 @@ #include <linux/videodev2.h> #include <linux/bitmap.h> #include <linux/fb.h> +#include <linux/of.h> #include <media/v4l2-mediabus.h> #include <video/videomode.h> @@ -345,6 +346,7 @@ struct ipu_client_platformdata { int dc; int dp; int dma[2]; + struct device_node *of_node; }; #endif /* __DRM_IPU_H__ */ diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h index ddcc8ca7316b..19aa65a35546 100644 --- a/include/video/mipi_display.h +++ b/include/video/mipi_display.h @@ -115,6 +115,14 @@ enum { MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E, MIPI_DCS_SET_TEAR_SCANLINE = 0x44, MIPI_DCS_GET_SCANLINE = 0x45, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 0x52, /* MIPI DCS 1.3 */ + MIPI_DCS_WRITE_CONTROL_DISPLAY = 0x53, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_CONTROL_DISPLAY = 0x54, /* MIPI DCS 1.3 */ + MIPI_DCS_WRITE_POWER_SAVE = 0x55, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_POWER_SAVE = 0x56, /* MIPI DCS 1.3 */ + MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F, /* MIPI DCS 1.3 */ MIPI_DCS_READ_DDB_START = 0xA1, MIPI_DCS_READ_DDB_CONTINUE = 0xA8, }; diff --git a/include/video/sh_mipi_dsi.h b/include/video/sh_mipi_dsi.h deleted file mode 100644 index a01f197e6ac1..000000000000 --- a/include/video/sh_mipi_dsi.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Public SH-mobile MIPI DSI header - * - * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef VIDEO_SH_MIPI_DSI_H -#define VIDEO_SH_MIPI_DSI_H - -enum sh_mipi_dsi_data_fmt { - MIPI_RGB888, - MIPI_RGB565, - MIPI_RGB666_LP, - MIPI_RGB666, - MIPI_BGR888, - MIPI_BGR565, - MIPI_BGR666_LP, - MIPI_BGR666, - MIPI_YUYV, - MIPI_UYVY, - MIPI_YUV420_L, - MIPI_YUV420, -}; - -#define SH_MIPI_DSI_HSABM (1 << 0) -#define SH_MIPI_DSI_HBPBM (1 << 1) -#define SH_MIPI_DSI_HFPBM (1 << 2) -#define SH_MIPI_DSI_BL2E (1 << 3) -#define SH_MIPI_DSI_VSEE (1 << 4) -#define SH_MIPI_DSI_HSEE (1 << 5) -#define SH_MIPI_DSI_HSAE (1 << 6) - -#define SH_MIPI_DSI_HSbyteCLK (1 << 24) -#define SH_MIPI_DSI_HS6divCLK (1 << 25) -#define SH_MIPI_DSI_HS4divCLK (1 << 26) - -#define SH_MIPI_DSI_SYNC_PULSES_MODE (SH_MIPI_DSI_VSEE | \ - SH_MIPI_DSI_HSEE | \ - SH_MIPI_DSI_HSAE) -#define SH_MIPI_DSI_SYNC_EVENTS_MODE (0) -#define SH_MIPI_DSI_SYNC_BURST_MODE (SH_MIPI_DSI_BL2E) - -struct sh_mipi_dsi_info { - enum sh_mipi_dsi_data_fmt data_format; - int channel; - int lane; - unsigned long flags; - u32 clksrc; - u32 phyctrl; /* for extra setting */ - unsigned int vsynw_offset; - int (*set_dot_clock)(struct platform_device *pdev, - void __iomem *base, - int enable); -}; - -#endif |