diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-02 02:48:47 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-02 02:48:47 +0100 |
commit | 4bf772b14675411a69b3c807f73006de0fe4b649 (patch) | |
tree | b841e3ba0e3429695589cb0ab73871fa12f42c38 /include/drm | |
parent | Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cl... (diff) | |
parent | drm/ast: Load lut in crtc_commit (diff) | |
download | linux-4bf772b14675411a69b3c807f73006de0fe4b649.tar.xz linux-4bf772b14675411a69b3c807f73006de0fe4b649.zip |
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This seems to have been a comparatively quieter merge window, I assume
due to holidays etc. The "biggest" change is AMD header cleanups, which
merge/remove a bunch of them. The AMD gpu scheduler is now being made generic
with the etnaviv driver wanting to reuse the code, hopefully other drivers
can go in the same direction.
Otherwise it's the usual lots of stuff in i915/amdgpu, not so much stuff
elsewhere.
Core:
- Add .last_close and .output_poll_changed helpers to reduce driver footprints
- Fix plane clipping
- Improved debug printing support
- Add panel orientation property
- Update edid derived properties at edid setting
- Reduction in fbdev driver footprint
- Move amdgpu scheduler into core for other drivers to use.
i915:
- Selftest and IGT improvements
- Fast boot prep work on IPS, pipe config
- HW workarounds for Cannonlake, Geminilake
- Cannonlake clock and HDMI2.0 fixes
- GPU cache invalidation and context switch improvements
- Display planes cleanup
- New PMU interface for perf queries
- New firmware support for KBL/SKL
- Geminilake HW workaround for perforamce
- Coffeelake stolen memory improvements
- GPU reset robustness work
- Cannonlake horizontal plane flipping
- GVT work
amdgpu/radeon:
- RV and Vega header file cleanups (lots of lines gone!)
- TTM operation context support
- 48-bit GPUVM support for Vega/RV
- ECC support for Vega
- Resizeable BAR support
- Multi-display sync support
- Enable swapout for reserved BOs during allocation
- S3 fixes on Raven
- GPU reset cleanup and fixes
- 2+1 level GPU page table
amdkfd:
- GFX7/8 SDMA user queues support
- Hardware scheduling for multiple processes
- dGPU prep work
rcar:
- Added R8A7743/5 support
- System suspend/resume support
sun4i:
- Multi-plane support for YUV formats
- A83T and LVDS support
msm:
- Devfreq support for GPU
tegra:
- Prep work for adding Tegra186 support
- Tegra186 HDMI support
- HDMI2.0 and zpos support by using generic helpers
tilcdc:
- Misc fixes
omapdrm:
- Support memory bandwidth limits
- DSI command mode panel cleanups
- DMM error handling
exynos:
- drop the old IPP subdriver.
etnaviv:
- Occlusion query fixes
- Job handling fixes
- Prep work for hooking in gpu scheduler
armada:
- Move closer to atomic modesetting
- Allow disabling primary plane if overlay is full screen
imx:
- Format modifier support
- Add tile prefetch to PRE
- Runtime PM support for PRG
ast:
- fix LUT loading"
* tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux: (1471 commits)
drm/ast: Load lut in crtc_commit
drm: Check for lessee in DROP_MASTER ioctl
drm: fix gpu scheduler link order
drm/amd/display: Demote error print to debug print when ATOM impl missing
dma-buf: fix reservation_object_wait_timeout_rcu once more v2
drm/amdgpu: Avoid leaking PM domain on driver unbind (v2)
drm/amd/amdgpu: Add Polaris version check
drm/amdgpu: Reenable manual GPU reset from sysfs
drm/amdgpu: disable MMHUB power gating on raven
drm/ttm: Don't unreserve swapped BOs that were previously reserved
drm/ttm: Don't add swapped BOs to swap-LRU list
drm/amdgpu: only check for ECC on Vega10
drm/amd/powerplay: Fix smu_table_entry.handle type
drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count
drm: Fix PANEL_ORIENTATION_QUIRKS breaking the Kconfig DRM menuconfig
drm/radeon: fill in rb backend map on evergreen/ni.
drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2)
drm/ttm: only free pages rather than update global memory count together
drm/amdgpu: fix CPU based VM updates
drm/amdgpu: fix typo in amdgpu_vce_validate_bo
...
Diffstat (limited to 'include/drm')
35 files changed, 1061 insertions, 528 deletions
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 59be1232d005..c6666cd09347 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -75,6 +75,7 @@ #include <drm/drm_sarea.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_pci.h> #include <drm/drm_file.h> #include <drm/drm_debugfs.h> @@ -94,212 +95,16 @@ struct dma_buf_attachment; struct pci_dev; struct pci_controller; -/* - * The following categories are defined: - * - * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... - * This is the category used by the DRM_DEBUG() macro. - * - * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... - * This is the category used by the DRM_DEBUG_DRIVER() macro. - * - * KMS: used in the modesetting code. - * This is the category used by the DRM_DEBUG_KMS() macro. - * - * PRIME: used in the prime code. - * This is the category used by the DRM_DEBUG_PRIME() macro. - * - * ATOMIC: used in the atomic code. - * This is the category used by the DRM_DEBUG_ATOMIC() macro. - * - * VBL: used for verbose debug message in the vblank code - * This is the category used by the DRM_DEBUG_VBL() macro. - * - * Enabling verbose debug messages is done through the drm.debug parameter, - * each category being enabled by a bit. - * - * drm.debug=0x1 will enable CORE messages - * drm.debug=0x2 will enable DRIVER messages - * drm.debug=0x3 will enable CORE and DRIVER messages - * ... - * drm.debug=0x3f will enable all messages - * - * An interesting feature is that it's possible to enable verbose logging at - * run-time by echoing the debug value in its sysfs node: - * # echo 0xf > /sys/module/drm/parameters/debug - */ -#define DRM_UT_NONE 0x00 -#define DRM_UT_CORE 0x01 -#define DRM_UT_DRIVER 0x02 -#define DRM_UT_KMS 0x04 -#define DRM_UT_PRIME 0x08 -#define DRM_UT_ATOMIC 0x10 -#define DRM_UT_VBL 0x20 -#define DRM_UT_STATE 0x40 -#define DRM_UT_LEASE 0x80 - /***********************************************************************/ /** \name DRM template customization defaults */ /*@{*/ /***********************************************************************/ -/** \name Macros to make printk easier */ -/*@{*/ - -#define _DRM_PRINTK(once, level, fmt, ...) \ - do { \ - printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ - ##__VA_ARGS__); \ - } while (0) - -#define DRM_INFO(fmt, ...) \ - _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE(fmt, ...) \ - _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN(fmt, ...) \ - _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) - -#define DRM_INFO_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) - -/** - * Error output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ - fmt, ##__VA_ARGS__) -#define DRM_ERROR(fmt, ...) \ - drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) - -/** - * Rate limited error output. Like DRM_ERROR() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - \ - if (__ratelimit(&_rs)) \ - DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ -}) -#define DRM_ERROR_RATELIMITED(fmt, ...) \ - DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) - -#define DRM_DEV_INFO(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ - ##__VA_ARGS__) - -#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ -({ \ - static bool __print_once __read_mostly; \ - if (!__print_once) { \ - __print_once = true; \ - DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ - } \ -}) - -/** - * Debug output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_DRIVER(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_KMS(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_PRIME(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_ATOMIC(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_VBL(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) - -#define DRM_DEBUG_LEASE(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) - -#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - if (__ratelimit(&_rs)) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ - __func__, "", fmt, ##args); \ -}) - -/** - * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ - DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) -#define DRM_DEBUG_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) -#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) -#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) -#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) - -/* Format strings and argument splitters to simplify printing - * various "complex" objects - */ - -/*@}*/ - -/***********************************************************************/ /** \name Internal types and structures */ /*@{*/ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) - /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 5afd6e364fb6..1c27526c499e 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -189,12 +189,40 @@ struct drm_private_state_funcs { struct drm_private_state *state); }; +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + */ struct drm_private_obj { + /** + * @state: Current atomic state for this driver private object. + */ struct drm_private_state *state; + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ const struct drm_private_state_funcs *funcs; }; +/** + * struct drm_private_state - base struct for driver private object state + * @state: backpointer to global drm_atomic_state + * + * Currently only contains a backpointer to the overall atomic update, but in + * the future also might hold synchronization information similar to e.g. + * &drm_crtc.commit. + */ struct drm_private_state { struct drm_atomic_state *state; }; @@ -218,6 +246,10 @@ struct __drm_private_objs_state { * @num_private_objs: size of the @private_objs array * @private_objs: pointer to array of private object pointers * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). */ struct drm_atomic_state { struct kref ref; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index d2b56cc657e9..4842ee9485ce 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -38,6 +38,13 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + const struct drm_rect *clip, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 5971577016a2..ed38df4ac204 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -177,6 +177,35 @@ enum drm_link_status { }; /** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** * struct drm_display_info - runtime data about the connected sink * * Describes a given display (e.g. CRT or flat panel) and its limitations. For @@ -224,6 +253,15 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_YCRCB420 (1<<3) /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones * as used to describe the pixel format in framebuffers, and also don't @@ -271,6 +309,11 @@ struct drm_display_info { bool dvi_dual; /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even * more stuff redundant with @bus_formats. */ @@ -705,7 +748,6 @@ struct drm_cmdline_mode { * @force: a DRM_FORCE_<foo> state for forced mode sets * @override_edid: has the EDID been overwritten through debugfs for testing? * @encoder_ids: valid encoders for this connector - * @encoder: encoder driving this connector, if any * @eld: EDID-like data, if present * @latency_present: AV delay info from ELD, if found * @video_latency: video latency info from ELD, if found @@ -875,7 +917,13 @@ struct drm_connector { #define DRM_CONNECTOR_MAX_ENCODER 3 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; - struct drm_encoder *encoder; /* currently active encoder */ + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 /* EDID bits */ @@ -1035,6 +1083,8 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid); void drm_mode_connector_set_link_status_property(struct drm_connector *connector, uint64_t link_status); +int drm_connector_init_panel_orientation_property( + struct drm_connector *connector, int width, int height); /** * struct drm_tile_group - Tile group metadata diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index e21af87a2f3c..7c4fa32f3fc6 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -17,6 +17,7 @@ struct drm_vblank_crtc; struct drm_sg_mem; struct drm_local_map; struct drm_vma_offset_manager; +struct drm_fb_helper; struct inode; @@ -185,6 +186,14 @@ struct drm_device { struct drm_vma_offset_manager *vma_offset_manager; /*@} */ int switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; }; #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2623a1255481..da58a428c8d7 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -635,6 +635,7 @@ # define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 #define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ # define DP_EDP_11 0x00 diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 412e83a4d3db..d32b688eb346 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -39,6 +39,7 @@ struct drm_minor; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; +struct drm_printer; /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 @@ -429,6 +430,20 @@ struct drm_driver { void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); /** + * @gem_print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + */ + void (*gem_print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by core @@ -592,13 +607,6 @@ struct drm_driver { int dev_priv_size; }; -__printf(6, 7) -void drm_dev_printk(const struct device *dev, const char *level, - unsigned int category, const char *function_name, - const char *prefix, const char *format, ...); -__printf(3, 4) -void drm_printk(const char *level, unsigned int category, - const char *format, ...); extern unsigned int drm_debug; int drm_dev_init(struct drm_device *dev, diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index efe6d5a8e834..8d89a9c3748d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -333,7 +333,6 @@ struct drm_encoder; struct drm_connector; struct drm_display_mode; -void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, @@ -357,6 +356,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, bool is_hdmi2_sink); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + struct drm_connector *connector, const struct drm_display_mode *mode); void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index ee4cfbe63c52..fb299696c7c4 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -88,7 +88,6 @@ struct drm_encoder_funcs { * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver - * @crtc: currently bound CRTC * @bridge: bridge associated to the encoder * @funcs: control functions * @helper_private: mid-layer private data @@ -166,6 +165,11 @@ struct drm_encoder { */ uint32_t possible_clones; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index faf56c53df28..d532f88a8d55 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,6 +16,13 @@ struct drm_mode_fb_cmd2; struct drm_plane; struct drm_plane_state; +int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int max_conn_count, + const struct drm_framebuffer_funcs *funcs); +int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_cma_fbdev_fini(struct drm_device *dev); + struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, unsigned int preferred_bpp, unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs); @@ -36,11 +43,5 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); -#ifdef CONFIG_DEBUG_FS -struct seq_file; - -int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); -#endif - #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 33fe95927742..b069433e7fc1 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -33,6 +33,7 @@ struct drm_fb_helper; #include <drm/drm_crtc.h> +#include <drm/drm_device.h> #include <linux/kgdb.h> enum mode_set_atomic { @@ -48,6 +49,7 @@ struct drm_fb_helper_crtc { struct drm_mode_set mode_set; struct drm_display_mode *desired_mode; int x, y; + int rotation; }; /** @@ -159,6 +161,13 @@ struct drm_fb_helper { int connector_count; int connector_info_alloc_count; /** + * @sw_rotations: + * Bitmask of all rotations requested for panel-orientation which + * could not be handled in hardware. If only one bit is set + * fbdev->fbcon_rotate_hint gets set to the requested rotation. + */ + int sw_rotations; + /** * @connector_info: * * Array of per-connector information. Do not iterate directly, but use @@ -267,6 +276,7 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist); +int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper); ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); @@ -310,6 +320,16 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); + +int drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_helper_fbdev_teardown(struct drm_device *dev); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -321,11 +341,17 @@ static inline int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper, int max_conn) { + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + return 0; } static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) { + if (helper && helper->dev) + helper->dev->fb_helper = NULL; } static inline int drm_fb_helper_blank(int blank, struct fb_info *info) @@ -398,6 +424,11 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info, { } +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) @@ -507,6 +538,32 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, return 0; } +static inline int +drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count) +{ + /* So drivers can use it to free the struct */ + dev->fb_helper = fb_helper; + + return 0; +} + +static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev) +{ + dev->fb_helper = NULL; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + #endif static inline int diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 4c5ee4ae54df..c50502c656e5 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -121,6 +121,12 @@ struct drm_framebuffer { * @base: base modeset object structure, contains the reference count. */ struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + /** * @format: framebuffer format information */ @@ -264,7 +270,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) * * This functions returns the framebuffer's reference count. */ -static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) { return kref_read(&fb->base.refcount); } diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 520e3feb502c..19777145cf8e 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -9,7 +9,9 @@ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations * @base: base GEM object * @paddr: physical address of the backing memory - * @sgt: scatter/gather table for imported PRIME buffers + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. * @vaddr: kernel virtual address of the backing memory */ struct drm_gem_cma_object { @@ -21,11 +23,8 @@ struct drm_gem_cma_object { void *vaddr; }; -static inline struct drm_gem_cma_object * -to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) -{ - return container_of(gem_obj, struct drm_gem_cma_object, base); -} +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) #ifndef CONFIG_MMU #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ @@ -91,9 +90,8 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, unsigned long flags); #endif -#ifdef CONFIG_DEBUG_FS -void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); -#endif +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 8d10fc97801c..101f566ae43d 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -386,7 +386,7 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, * @color: opaque tag value to use for this node * @mode: fine-tune the allocation search and placement * - * This is a simplified version of drm_mm_insert_node_in_range_generic() with no + * This is a simplified version of drm_mm_insert_node_in_range() with no * range restrictions applied. * * The preallocated node must be cleared to 0. diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b0ce26d71296..2cb6f02df64a 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -269,6 +269,9 @@ struct drm_mode_config_funcs { * state easily. If this hook is implemented, drivers must also * implement @atomic_state_clear and @atomic_state_free. * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * * RETURNS: * * A new &drm_atomic_state on success or NULL on failure. @@ -290,6 +293,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call drm_atomic_state_default_clear() * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_clear)(struct drm_atomic_state *state); @@ -302,6 +308,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_free)(struct drm_atomic_state *state); }; @@ -751,6 +760,13 @@ struct drm_mode_config { */ struct drm_property *non_desktop_property; + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; @@ -768,7 +784,7 @@ struct drm_mode_config { bool allow_fb_modifiers; /** - * @modifiers: Plane property to list support modifier/format + * @modifiers_property: Plane property to list support modifier/format * combination. */ struct drm_property *modifiers_property; @@ -776,6 +792,15 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height; + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + const struct drm_mode_config_helper_funcs *helper_private; }; diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index cb0ec92e11e6..efa337f03129 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -34,4 +34,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_device *dev, int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + #endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 16646c44b7df..3e76ca805b0f 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -801,9 +801,6 @@ struct drm_connector_helper_funcs { * resolution can call drm_add_modes_noedid(), and mark the preferred * one using drm_set_preferred_mode(). * - * Finally drivers that support audio probably want to update the ELD - * data, too, using drm_edid_to_eld(). - * * This function is only called after the @detect hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 571615079230..8185e3468a23 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -474,8 +474,8 @@ enum drm_plane_type { * @format_types: array of formats supported by this plane * @format_count: number of formats supported * @format_default: driver hasn't supplied supported formats for the plane - * @crtc: currently bound CRTC - * @fb: currently bound fb + * @modifiers: array of modifiers supported by this plane + * @modifier_count: number of modifiers supported * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by * drm_mode_set_config_internal() to implement correct refcounting. * @funcs: helper functions @@ -512,7 +512,17 @@ struct drm_plane { uint64_t *modifiers; unsigned int modifier_count; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check &drm_plane_state.crtc. + */ struct drm_crtc *crtc; + + /** + * @fb: Currently bound framebuffer, only really meaningful for + * non-atomic drivers. Atomic drivers should instead check + * &drm_plane_state.fb. + */ struct drm_framebuffer *fb; struct drm_framebuffer *old_fb; diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 7c8a00ceadb7..8aa49c0ecd4d 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -38,11 +38,6 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -int drm_plane_helper_check_state(struct drm_plane_state *state, - const struct drm_rect *clip, - int min_scale, int max_scale, - bool can_position, - bool can_update_disabled); int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index ca4d7c6321f2..2a4a42e59a47 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -80,6 +80,29 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file @@ -128,4 +151,200 @@ static inline struct drm_printer drm_debug_printer(const char *prefix) }; return p; } + +/* + * The following categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_NONE 0x00 +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 +#define DRM_UT_STATE 0x40 +#define DRM_UT_LEASE 0x80 + +__printf(6, 7) +void drm_dev_printk(const struct device *dev, const char *level, + unsigned int category, const char *function_name, + const char *prefix, const char *format, ...); +__printf(3, 4) +void drm_printk(const char *level, unsigned int category, + const char *format, ...); + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + do { \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ + fmt, ##__VA_ARGS__) +#define DRM_ERROR(fmt, ...) \ + drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ + ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * Debug output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_DRIVER(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_KMS(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_PRIME(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_VBL(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_LEASE(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ + __func__, "", fmt, ##args); \ +}) + +/** + * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ + DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) +#define DRM_DEBUG_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) +#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) +#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) + #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 43e2f382d2f0..3980602472c0 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -33,36 +33,31 @@ struct drm_syncobj_cb; /** * struct drm_syncobj - sync object. * - * This structure defines a generic sync object which wraps a dma fence. + * This structure defines a generic sync object which wraps a &dma_fence. */ struct drm_syncobj { /** - * @refcount: - * - * Reference count of this object. + * @refcount: Reference count of this object. */ struct kref refcount; /** * @fence: * NULL or a pointer to the fence bound to this object. * - * This field should not be used directly. Use drm_syncobj_fence_get - * and drm_syncobj_replace_fence instead. + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. */ - struct dma_fence *fence; + struct dma_fence __rcu *fence; /** - * @cb_list: - * List of callbacks to call when the fence gets replaced + * @cb_list: List of callbacks to call when the &fence gets replaced. */ struct list_head cb_list; /** - * @lock: - * locks cb_list and write-locks fence. + * @lock: Protects &cb_list and write-locks &fence. */ spinlock_t lock; /** - * @file: - * a file backing for this syncobj. + * @file: A file backing for this syncobj. */ struct file *file; }; @@ -73,7 +68,7 @@ typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, /** * struct drm_syncobj_cb - callback for drm_syncobj_add_callback * @node: used by drm_syncob_add_callback to append this struct to - * syncobj::cb_list + * &drm_syncobj.cb_list * @func: drm_syncobj_func_t to call * * This struct will be initialized by drm_syncobj_add_callback, additional @@ -92,7 +87,7 @@ void drm_syncobj_free(struct kref *kref); * drm_syncobj_get - acquire a syncobj reference * @obj: sync object * - * This acquires additional reference to @obj. It is illegal to call this + * This acquires an additional reference to @obj. It is illegal to call this * without already holding a reference. No locks required. */ static inline void @@ -111,6 +106,17 @@ drm_syncobj_put(struct drm_syncobj *obj) kref_put(&obj->refcount, drm_syncobj_free); } +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ static inline struct dma_fence * drm_syncobj_fence_get(struct drm_syncobj *syncobj) { diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000000..a803988d8579 --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +int drm_get_panel_orientation_quirk(int width, int height); + +#endif diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index d84d52f6d2b1..8758df94e9a0 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -152,7 +152,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) * Start address of @node for page-based addressing. 0 if the node does not * have an offset allocated. */ -static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) { return node->vm_node.start; } diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..dfd54fb94e10 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,173 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include <drm/spsc_queue.h> +#include <linux/dma-fence.h> + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 4e1b274e1164..c9e5a6621b95 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -36,6 +36,9 @@ extern bool i915_gpu_lower(void); extern bool i915_gpu_busy(void); extern bool i915_gpu_turbo_disable(void); +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 972a25633525..5db0458dd832 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -373,24 +373,46 @@ /* CFL S */ #define INTEL_CFL_S_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ #define INTEL_CFL_S_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ #define INTEL_CFL_H_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ -/* CFL U */ +/* CFL U GT1 */ +#define INTEL_CFL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info), \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ #define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT1_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info) /* CNL U 2+2 */ #define INTEL_CNL_U_GT2_IDS(info) \ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index c5db7975c640..2324c84a25c0 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -5,9 +5,8 @@ #define _DRM_INTEL_GTT_H void intel_gtt_get(u64 *gtt_total, - u32 *stolen_size, phys_addr_t *mappable_base, - u64 *mappable_end); + resource_size_t *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include <linux/atomic.h> +#include <linux/preempt.h> + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h index 83346ddb9dba..5d0e82b36eaf 100644 --- a/include/drm/tinydrm/mipi-dbi.h +++ b/include/drm/tinydrm/mipi-dbi.h @@ -72,10 +72,12 @@ void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); void mipi_dbi_hw_reset(struct mipi_dbi *mipi); bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); - +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) * @mipi: MIPI structure diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index 4774fe3d4273..07a9a11fe19d 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -19,16 +19,12 @@ * @drm: DRM device * @pipe: Display pipe structure * @dirty_lock: Serializes framebuffer flushing - * @fbdev_cma: CMA fbdev structure - * @suspend_state: Atomic state when suspended * @fb_funcs: Framebuffer functions used when creating framebuffers */ struct tinydrm_device { struct drm_device *drm; struct drm_simple_display_pipe pipe; struct mutex dirty_lock; - struct drm_fbdev_cma *fbdev_cma; - struct drm_atomic_state *suspend_state; const struct drm_framebuffer_funcs *fb_funcs; }; @@ -46,6 +42,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) */ #define TINYDRM_GEM_DRIVER_OPS \ .gem_free_object = tinydrm_gem_cma_free_object, \ + .gem_print_info = drm_gem_cma_print_info, \ .gem_vm_ops = &drm_gem_cma_vm_ops, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ @@ -81,7 +78,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) .type = DRM_MODE_TYPE_DRIVER, \ .clock = 1 /* pass validation */ -void tinydrm_lastclose(struct drm_device *drm); void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); struct drm_gem_object * tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, @@ -92,8 +88,6 @@ int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, struct drm_driver *driver); int devm_tinydrm_register(struct tinydrm_device *tdev); void tinydrm_shutdown(struct tinydrm_device *tdev); -int tinydrm_suspend(struct tinydrm_device *tdev); -int tinydrm_resume(struct tinydrm_device *tdev); void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index fa07be197945..2cd025c2abe7 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -224,7 +224,6 @@ struct ttm_buffer_object { */ uint64_t offset; /* GPU address space is independent of CPU word size */ - uint32_t cur_placement; struct sg_table *sg; @@ -260,6 +259,25 @@ struct ttm_bo_kmap_obj { }; /** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @allow_reserved_eviction: Allow eviction of reserved BOs. + * @resv: Reservation object to allow reserved evictions with. + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + bool allow_reserved_eviction; + struct reservation_object *resv; + uint64_t bytes_moved; +}; + +/** * ttm_bo_reference - reference a struct ttm_buffer_object * * @bo: The buffer object. @@ -288,8 +306,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) * Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_wait(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait); +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); /** * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo @@ -300,17 +317,15 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, * * Returns true if the placement is compatible */ -extern bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags); +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, + uint32_t *new_flags); /** * ttm_bo_validate * * @bo: The buffer object. * @placement: Proposed placement for the buffer object. - * @interruptible: Sleep interruptible if sleeping. - * @no_wait_gpu: Return immediately if the GPU is busy. + * @ctx: validation parameters. * * Changes placement and caching policy of the buffer object * according proposed placement. @@ -320,10 +335,9 @@ extern bool ttm_bo_mem_compat(struct ttm_placement *placement, * -EBUSY if no_wait is true and buffer busy. * -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); /** * ttm_bo_unref @@ -332,7 +346,7 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, * * Unreference and clear a pointer to a buffer object. */ -extern void ttm_bo_unref(struct ttm_buffer_object **bo); +void ttm_bo_unref(struct ttm_buffer_object **bo); /** * ttm_bo_add_to_lru @@ -344,7 +358,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); * This function must be called with struct ttm_bo_global::lru_lock held, and * is typically called immediately prior to unreserving a bo. */ -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * ttm_bo_del_from_lru @@ -356,7 +370,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); * and is usually called just immediately after the bo has been reserved to * avoid recursive reservation from lru lists. */ -extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); /** * ttm_bo_move_to_lru_tail @@ -367,7 +381,7 @@ extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue @@ -376,15 +390,14 @@ extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); * Returns * True if the workqueue was queued at the time */ -extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, - int resched); +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); /** * ttm_bo_eviction_valuable @@ -411,8 +424,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * -EBUSY if the buffer is busy and no_wait is true. * -ERESTARTSYS if interrupted by a signal. */ -extern int -ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); /** * ttm_bo_synccpu_write_release: @@ -421,7 +433,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); * * Releases a synccpu lock. */ -extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); /** * ttm_bo_acc_size @@ -448,8 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. - * @interruptible: If needing to sleep to wait for GPU resources, - * sleep interruptible. + * @ctx: TTM operation context for memory allocation. * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would @@ -480,18 +491,18 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init_reserved(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + struct ttm_operation_ctx *ctx, + struct file *persistent_swap_storage, + size_t acc_size, + struct sg_table *sg, + struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_init @@ -531,19 +542,13 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ - -extern int ttm_bo_init(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, + unsigned long size, enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, bool interrubtible, + struct file *persistent_swap_storage, size_t acc_size, + struct sg_table *sg, struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_create @@ -569,15 +574,11 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while waiting for resources. */ - -extern int ttm_bo_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interruptible, - struct file *persistent_swap_storage, - struct ttm_buffer_object **p_bo); +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t page_alignment, bool interruptible, + struct file *persistent_swap_storage, + struct ttm_buffer_object **p_bo); /** * ttm_bo_init_mm @@ -594,9 +595,9 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, * -ENOMEM: Not enough memory. * May also return driver-specified errors. */ +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size); -extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size); /** * ttm_bo_clean_mm * @@ -623,8 +624,7 @@ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, * -EINVAL: invalid or uninitialized memory type. * -EBUSY: There are still buffers left in this memory type. */ - -extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_bo_evict_mm @@ -644,8 +644,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); * -ERESTARTSYS: The call was interrupted by a signal while waiting to * evict a buffer. */ - -extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_kmap_obj_virtual @@ -658,7 +657,6 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); * If *is_iomem is 1 on return, the virtual address points to an io memory area, * that should strictly be accessed by the iowriteXX() and similar functions. */ - static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, bool *is_iomem) { @@ -682,9 +680,8 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ - -extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, - unsigned long num_pages, struct ttm_bo_kmap_obj *map); +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); /** * ttm_bo_kunmap @@ -693,8 +690,7 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, * * Unmaps a kernel map set up by ttm_bo_kmap. */ - -extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); /** * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. @@ -706,20 +702,7 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * This function is intended to be called by the fbdev mmap method * if the fbdev address space is to be backed by a bo. */ - -extern int ttm_fbdev_mmap(struct vm_area_struct *vma, - struct ttm_buffer_object *bo); - -/** - * ttm_bo_default_iomem_pfn - get a pfn for a page offset - * - * @bo: the BO we need to look up the pfn for - * @page_offset: offset inside the BO to look up. - * - * Calculate the PFN for iomem based mappings during page fault - */ -unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, - unsigned long page_offset); +int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); /** * ttm_bo_mmap - mmap out of the ttm device address space. @@ -731,9 +714,8 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, * This function is intended to be called by the device mmap method. * if the device address space is to be backed by the bo manager. */ - -extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev); +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); /** * ttm_bo_io @@ -755,11 +737,12 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * the function may return -ERESTARTSYS if * interrupted by a signal. */ +ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); -extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, - const char __user *wbuf, char __user *rbuf, - size_t count, loff_t *f_pos, bool write); - -extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); +int ttm_bo_swapout(struct ttm_bo_global *glob, + struct ttm_operation_ctx *ctx); +void ttm_bo_swapout_all(struct ttm_bo_device *bdev); +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5f821a9b3a1f..94064b126e8e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -352,7 +352,8 @@ struct ttm_bo_driver { * Returns: * -ENOMEM: Out of memory. */ - int (*ttm_tt_populate)(struct ttm_tt *ttm); + int (*ttm_tt_populate)(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate @@ -409,15 +410,13 @@ struct ttm_bo_driver { * @bo: the buffer to move * @evict: whether this motion is evicting the buffer from * the graphics address space - * @interruptible: Use interruptible sleeps if possible when sleeping. - * @no_wait: whether this should give up and return -EBUSY - * if this move would require sleeping + * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer * * Move a buffer between two memory regions. */ int (*move)(struct ttm_buffer_object *bo, bool evict, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem); /** @@ -524,7 +523,6 @@ struct ttm_bo_global { struct kobject kobj; struct ttm_mem_global *mem_glob; struct page *dummy_read_page; - struct ttm_mem_shrink shrink; struct mutex device_list_mutex; spinlock_t lru_lock; @@ -627,12 +625,12 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) * Returns: * NULL: Out of memory. */ -extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); /** * ttm_tt_fini @@ -641,8 +639,8 @@ extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bde * * Free memory of ttm_tt structure */ -extern void ttm_tt_fini(struct ttm_tt *ttm); -extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); +void ttm_tt_fini(struct ttm_tt *ttm); +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); /** * ttm_ttm_bind: @@ -652,7 +650,8 @@ extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); * * Bind the pages of @ttm to an aperture location identified by @bo_mem */ -extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, + struct ttm_operation_ctx *ctx); /** * ttm_ttm_destroy: @@ -661,7 +660,7 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); * * Unbind, unpopulate and destroy common struct ttm_tt. */ -extern void ttm_tt_destroy(struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_tt *ttm); /** * ttm_ttm_unbind: @@ -670,7 +669,7 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm); * * Unbind a struct ttm_tt. */ -extern void ttm_tt_unbind(struct ttm_tt *ttm); +void ttm_tt_unbind(struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -679,7 +678,7 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm); * * Swap in a previously swap out ttm_tt. */ -extern int ttm_tt_swapin(struct ttm_tt *ttm); +int ttm_tt_swapin(struct ttm_tt *ttm); /** * ttm_tt_set_placement_caching: @@ -694,9 +693,8 @@ extern int ttm_tt_swapin(struct ttm_tt *ttm); * hit RAM. This function may be very costly as it involves global TLB * and cache flushes and potential page splitting / combining. */ -extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); -extern int ttm_tt_swapout(struct ttm_tt *ttm, - struct file *persistent_swap_storage); +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); /** * ttm_tt_unpopulate - free pages from a ttm @@ -705,7 +703,7 @@ extern int ttm_tt_swapout(struct ttm_tt *ttm, * * Calls the driver method to free all pages from a ttm */ -extern void ttm_tt_unpopulate(struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_tt *ttm); /* * ttm_bo.c @@ -720,8 +718,7 @@ extern void ttm_tt_unpopulate(struct ttm_tt *ttm); * Returns true if the memory described by @mem is PCI memory, * false otherwise. */ -extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); /** * ttm_bo_mem_space @@ -742,21 +739,19 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * fragmentation or concurrent allocators. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ -extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx); -extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void ttm_bo_global_release(struct drm_global_reference *ref); -extern int ttm_bo_global_init(struct drm_global_reference *ref); +void ttm_bo_global_release(struct drm_global_reference *ref); +int ttm_bo_global_init(struct drm_global_reference *ref); -extern int ttm_bo_device_release(struct ttm_bo_device *bdev); +int ttm_bo_device_release(struct ttm_bo_device *bdev); /** * ttm_bo_device_init @@ -773,18 +768,17 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); * Returns: * !0: Failure. */ -extern int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_global *glob, - struct ttm_bo_driver *driver, - struct address_space *mapping, - uint64_t file_page_offset, bool need_dma32); +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + struct address_space *mapping, + uint64_t file_page_offset, bool need_dma32); /** * ttm_bo_unmap_virtual * * @bo: tear down the virtual mappings for this BO */ -extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); /** * ttm_bo_unmap_virtual @@ -793,16 +787,15 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * * The caller must take ttm_mem_io_lock before calling this function. */ -extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); -extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); -extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, - bool interruptible); -extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); -extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * __ttm_bo_reserve: @@ -836,14 +829,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, if (WARN_ON(ticket)) return -EBUSY; - success = ww_mutex_trylock(&bo->resv->lock); + success = reservation_object_trylock(bo->resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); + ret = reservation_object_lock_interruptible(bo->resv, ticket); else - ret = ww_mutex_lock(&bo->resv->lock, ticket); + ret = reservation_object_lock(bo->resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -941,18 +934,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } /** - * __ttm_bo_unreserve - * @bo: A pointer to a struct ttm_buffer_object. - * - * Unreserve a previous reservation of @bo where the buffer object is - * already on lru lists. - */ -static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) -{ - ww_mutex_unlock(&bo->resv->lock); -} - -/** * ttm_bo_unreserve * * @bo: A pointer to a struct ttm_buffer_object. @@ -966,20 +947,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); spin_unlock(&bo->glob->lru_lock); } - __ttm_bo_unreserve(bo); -} - -/** - * ttm_bo_unreserve_ticket - * @bo: A pointer to a struct ttm_buffer_object. - * @ticket: ww_acquire_ctx used for reserving - * - * Unreserve a previous reservation of @bo made with @ticket. - */ -static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, - struct ww_acquire_ctx *t) -{ - ttm_bo_unreserve(bo); + reservation_object_unlock(bo->resv); } /* @@ -1008,9 +976,9 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * !0: Failure. */ -extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_move_memcpy @@ -1030,9 +998,9 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * !0: Failure. */ -extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_free_old_node @@ -1041,7 +1009,7 @@ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * * Utility function to free an old placement after a successful move. */ -extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); +void ttm_bo_free_old_node(struct ttm_buffer_object *bo); /** * ttm_bo_move_accel_cleanup. @@ -1058,10 +1026,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * destroyed when the move is complete. This will help pipeline * buffer moves. */ - -extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + struct ttm_mem_reg *new_mem); /** * ttm_bo_pipeline_move. @@ -1087,7 +1054,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * Utility function that returns the pgprot_t that should be used for * setting up a PTE with the caching model indicated by @c_state. */ -extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; @@ -1108,11 +1075,11 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; * for TT memory. This function uses the linux agpgart interface to * bind and unbind memory backing a ttm_tt. */ -extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, - struct agp_bridge_data *bridge, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -int ttm_agp_tt_populate(struct ttm_tt *ttm); +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); #endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 2c1e3598effe..8936285b6543 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -35,20 +35,7 @@ #include <linux/errno.h> #include <linux/kobject.h> #include <linux/mm.h> - -/** - * struct ttm_mem_shrink - callback to shrink TTM memory usage. - * - * @do_shrink: The callback function. - * - * Arguments to the do_shrink functions are intended to be passed using - * inheritance. That is, the argument class derives from struct ttm_mem_shrink, - * and can be accessed using container_of(). - */ - -struct ttm_mem_shrink { - int (*do_shrink) (struct ttm_mem_shrink *); -}; +#include "ttm_bo_api.h" /** * struct ttm_mem_global - Global memory accounting structure. @@ -76,7 +63,7 @@ struct ttm_mem_shrink { struct ttm_mem_zone; struct ttm_mem_global { struct kobject kobj; - struct ttm_mem_shrink *shrink; + struct ttm_bo_global *bo_glob; struct workqueue_struct *swap_queue; struct work_struct work; spinlock_t lock; @@ -90,67 +77,15 @@ struct ttm_mem_global { #endif }; -/** - * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object - * - * @shrink: The object to initialize. - * @func: The callback function. - */ - -static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, - int (*func) (struct ttm_mem_shrink *)) -{ - shrink->do_shrink = func; -} - -/** - * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to register with. - * @shrink: An initialized struct ttm_mem_shrink object to register. - * - * Returns: - * -EBUSY: There's already a callback registered. (May change). - */ - -static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - if (glob->shrink != NULL) { - spin_unlock(&glob->lock); - return -EBUSY; - } - glob->shrink = shrink; - spin_unlock(&glob->lock); - return 0; -} - -/** - * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to unregister from. - * @shrink: A previously registert struct ttm_mem_shrink object. - * - */ - -static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - BUG_ON(glob->shrink != shrink); - glob->shrink = NULL; - spin_unlock(&glob->lock); -} - extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob); extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible); + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, uint64_t size); extern size_t ttm_round_pot(size_t size); diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 593811362a91..4d9b019d253c 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -int ttm_pool_populate(struct ttm_tt *ttm); +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_pool_unpopulate: @@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Populates and DMA maps pages to fullfil a ttm_dma_populate() request */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); /** * Unpopulates and DMA unmaps pages as part of a @@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void); */ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); #else @@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, - struct device *dev) + struct device *dev, + struct ttm_operation_ctx *ctx) { return -ENOMEM; } |