diff options
Diffstat (limited to 'drivers/gpu')
138 files changed, 4791 insertions, 3567 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 785127cb281b..1368826ef284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,7 +9,6 @@ menuconfig DRM depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU select I2C select I2C_ALGOBIT - select SLOW_WORK help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -96,6 +95,7 @@ config DRM_I915 select FB_CFB_IMAGEBLIT # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select BACKLIGHT_LCD_SUPPORT if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index 1c3649242208..9afe495c12c7 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c @@ -31,6 +31,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/export.h> #include "drmP.h" # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 529a0dbe9fc6..08ccefedb327 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c @@ -32,6 +32,7 @@ * Pauli Nieminen <suokkos-at-gmail-dot-com> */ +#include <linux/export.h> #include "drm_buffer.h" /** diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 61e1ef90d4e5..30372f7b2d45 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -36,6 +36,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/log2.h> +#include <linux/export.h> #include <asm/shmparam.h> #include "drmP.h" diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 0e3bd5b54b78..592865381c6e 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -28,6 +28,7 @@ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ +#include <linux/export.h> #include "drmP.h" #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fe738f05309b..8323fc389840 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -31,6 +31,7 @@ */ #include <linux/list.h> #include <linux/slab.h> +#include <linux/export.h> #include "drm.h" #include "drmP.h" #include "drm_crtc.h" @@ -162,6 +163,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, + { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = @@ -170,6 +172,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, + { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, }; char *drm_get_encoder_name(struct drm_encoder *encoder) @@ -463,8 +466,10 @@ void drm_connector_init(struct drm_device *dev, list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; - drm_connector_attach_property(connector, - dev->mode_config.edid_property, 0); + if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) + drm_connector_attach_property(connector, + dev->mode_config.edid_property, + 0); drm_connector_attach_property(connector, dev->mode_config.dpms_property, 0); @@ -1868,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, } if (num_clips && clips_ptr) { + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { + ret = -EINVAL; + goto out_err1; + } clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); if (!clips) { ret = -ENOMEM; @@ -2113,8 +2122,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, property->num_values = num_values; INIT_LIST_HEAD(&property->enum_blob_list); - if (name) + if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); + property->name[DRM_PROP_NAME_LEN-1] = '\0'; + } list_add_tail(&property->head, &dev->mode_config.property_list); return property; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f2366440b738..d2619d72cece 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -29,6 +29,9 @@ * Jesse Barnes <jesse.barnes@intel.com> */ +#include <linux/export.h> +#include <linux/moduleparam.h> + #include "drmP.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" @@ -453,6 +456,30 @@ done: EXPORT_SYMBOL(drm_crtc_helper_set_mode); +static int +drm_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + + /* Decouple all encoders and their attached connectors from this crtc */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc != crtc) + continue; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder != encoder) + continue; + + connector->encoder = NULL; + } + } + + drm_helper_disable_unused_functions(dev); + return 0; +} + /** * drm_crtc_helper_set_config - set a new config from userspace * @crtc: CRTC to setup @@ -481,6 +508,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_connector *save_connectors, *connector; int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_mode_set save_set; int ret = 0; int i; @@ -506,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) (int)set->num_connectors, set->x, set->y); } else { DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); - set->mode = NULL; - set->num_connectors = 0; + return drm_crtc_helper_disable(set->crtc); } dev = set->crtc->dev; @@ -553,6 +580,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) save_connectors[count++] = *connector; } + save_set.crtc = set->crtc; + save_set.mode = &set->crtc->mode; + save_set.x = set->crtc->x; + save_set.y = set->crtc->y; + save_set.fb = set->crtc->fb; + /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ if (set->crtc->fb != set->fb) { @@ -718,6 +751,12 @@ fail: *connector = save_connectors[count++]; } + /* Try to restore the config */ + if (mode_changed && + !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, + save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + kfree(save_connectors); kfree(save_encoders); kfree(save_crtcs); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b9dc2629ea9a..1c7a1c0d3edd 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -33,6 +33,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/export.h> #include "drmP.h" #if defined(CONFIG_DEBUG_FS) @@ -117,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, tmp->minor = minor; tmp->dent = ent; tmp->info_ent = &files[i]; - list_add(&(tmp->list), &(minor->debugfs_nodes.list)); + + mutex_lock(&minor->debugfs_lock); + list_add(&tmp->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); } return 0; @@ -145,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, char name[64]; int ret; - INIT_LIST_HEAD(&minor->debugfs_nodes.list); + INIT_LIST_HEAD(&minor->debugfs_list); + mutex_init(&minor->debugfs_lock); sprintf(name, "%d", minor_id); minor->debugfs_root = debugfs_create_dir(name, root); if (!minor->debugfs_root) { @@ -191,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, struct drm_info_node *tmp; int i; + mutex_lock(&minor->debugfs_lock); for (i = 0; i < count; i++) { - list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { + list_for_each_safe(pos, q, &minor->debugfs_list) { tmp = list_entry(pos, struct drm_info_node, list); if (tmp->info_ent == &files[i]) { debugfs_remove(tmp->dent); @@ -201,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, } } } + mutex_unlock(&minor->debugfs_lock); return 0; } EXPORT_SYMBOL(drm_debugfs_remove_files); diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index 252cbd74df0e..cfb4e333ec0f 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -33,6 +33,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/export.h> #include "drmP.h" /** diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7a87e0878f30..40c187c60f44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -48,6 +48,7 @@ #include <linux/debugfs.h> #include <linux/slab.h> +#include <linux/export.h> #include "drmP.h" #include "drm_core.h" @@ -124,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fe39c3570538..3e927ce7557d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -30,6 +30,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/export.h> #include "drmP.h" #include "drm_edid.h" #include "drm_edid_modes.h" diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index d62c064fbaa0..fb943551060e 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -24,6 +24,8 @@ * */ +#include <linux/module.h> + #include "drm_encoder_slave.h" /** diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f7c6854eb4dd..80fe39d98b0c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -31,6 +31,7 @@ #include <linux/sysrq.h> #include <linux/slab.h> #include <linux/fb.h> +#include <linux/module.h> #include "drmP.h" #include "drm_crtc.h" #include "drm_fb_helper.h" diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 2ec7d48fc4a8..4911e1d1dcf2 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -37,6 +37,7 @@ #include "drmP.h" #include <linux/poll.h> #include <linux/slab.h> +#include <linux/module.h> /* from BKL pushdown: note that nothing else serializes idr_find() */ DEFINE_MUTEX(drm_global_mutex); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index e3a75688f3cd..68dc8744b630 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -36,6 +36,7 @@ #include "drm_hashtab.h" #include <linux/hash.h> #include <linux/slab.h> +#include <linux/export.h> int drm_ht_create(struct drm_open_hash *ht, unsigned int order) { diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 4a058c7af6c0..ddd70db45f76 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -29,6 +29,7 @@ */ #include <linux/compat.h> #include <linux/ratelimit.h> +#include <linux/export.h> #include "drmP.h" #include "drm_core.h" diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3830e9e478c0..44a5d0ad8b7c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -40,6 +40,7 @@ #include <linux/slab.h> #include <linux/vgaarb.h> +#include <linux/export.h> /* Access macro for slots in vblank timestamp ringbuffer. */ #define vblanktimestamp(dev, crtc, count) ( \ @@ -109,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. - * Disable preemption, so vblank_time_lock is held as short as - * possible, even under a kernel with PREEMPT_RT patches. */ - preempt_disable(); spin_lock_irqsave(&dev->vblank_time_lock, irqflags); dev->driver->disable_vblank(dev, crtc); @@ -163,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) clear_vblank_timestamps(dev, crtc); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); - preempt_enable(); } static void vblank_disable_fn(unsigned long arg) @@ -406,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev) /* * Wake up any waiters so they don't hang. */ - spin_lock_irqsave(&dev->vbl_lock, irqflags); - for (i = 0; i < dev->num_crtcs; i++) { - DRM_WAKEUP(&dev->vbl_queue[i]); - dev->vblank_enabled[i] = 0; - dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); + if (dev->num_crtcs) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for (i = 0; i < dev->num_crtcs; i++) { + DRM_WAKEUP(&dev->vbl_queue[i]); + dev->vblank_enabled[i] = 0; + dev->last_vblank[i] = + dev->driver->get_vblank_counter(dev, i); + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); if (!irq_enabled) return -EINVAL; @@ -885,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { - /* Disable preemption while holding vblank_time_lock. Do - * it explicitely to guard against PREEMPT_RT kernel. - */ - preempt_disable(); spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); if (!dev->vblank_enabled[crtc]) { /* Enable vblank irqs under vblank_time_lock protection. @@ -908,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) } } spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); - preempt_enable(); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); @@ -1124,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, trace_drm_vblank_event_delivered(current->pid, pipe, vblwait->request.sequence); } else { + /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); vblwait->reply.sequence = vblwait->request.sequence; } @@ -1204,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } - if (flags & _DRM_VBLANK_EVENT) + if (flags & _DRM_VBLANK_EVENT) { + /* must hold on to the vblank ref until the event fires + * drm_vblank_put will be called asynchronously + */ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index c9b805000a11..c8b6b66d428d 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -34,6 +34,7 @@ */ #include <linux/highmem.h> +#include <linux/export.h> #include "drmP.h" /** diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 959186cbf328..961fb54f4266 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -45,6 +45,7 @@ #include "drm_mm.h" #include <linux/slab.h> #include <linux/seq_file.h> +#include <linux/export.h> #define MM_UNUSED_TARGET 4 diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ad74fb4dc542..fb8e46b4e8bc 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/list_sort.h> +#include <linux/export.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index b6a19cb07caf..d4d10b7880cf 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -39,6 +39,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/dma-mapping.h> +#include <linux/export.h> #include "drmP.h" /**********************************************************************/ diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 2a8b6265ad3d..ae9db5e2b27c 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -25,6 +25,7 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/export.h> #include "drmP.h" /** diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 0f3c4e3cafc3..fff87221f9e9 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -39,6 +39,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/export.h> #include "drmP.h" /*************************************************** diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c index 34664587a74e..cebce45f4429 100644 --- a/drivers/gpu/drm/drm_sman.c +++ b/drivers/gpu/drm/drm_sman.c @@ -36,6 +36,7 @@ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ +#include <linux/export.h> #include "drm_sman.h" struct drm_owner_item { diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 2eee8e016b38..0f9ef9bf6730 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -16,6 +16,7 @@ #include <linux/kdev_t.h> #include <linux/gfp.h> #include <linux/err.h> +#include <linux/export.h> #include "drm_sysfs.h" #include "drm_core.h" diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 206d2300d873..445003f4dc93 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -1,5 +1,6 @@ #include "drmP.h" #include <linux/usb.h> +#include <linux/export.h> #ifdef CONFIG_USB int drm_get_usb_dev(struct usb_interface *interface, diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 5db96d45fc71..8c03eaf41448 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -34,6 +34,7 @@ */ #include "drmP.h" +#include <linux/export.h> #if defined(__ia64__) #include <linux/efi.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 6f8afea94fc9..2bb07bca511a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -27,82 +27,84 @@ #include "drm.h" #include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" -static DEFINE_MUTEX(exynos_drm_buf_lock); - static int lowlevel_buffer_allocate(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s\n", __FILE__); - entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, - (dma_addr_t *)&entry->paddr, GFP_KERNEL); - if (!entry->paddr) { + buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size, + &buffer->dma_addr, GFP_KERNEL); + if (!buffer->kvaddr) { DRM_ERROR("failed to allocate buffer.\n"); return -ENOMEM; } - DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", - (unsigned int)entry->vaddr, entry->paddr, entry->size); + DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)buffer->kvaddr, + (unsigned long)buffer->dma_addr, + buffer->size); return 0; } static void lowlevel_buffer_deallocate(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s.\n", __FILE__); - if (entry->paddr && entry->vaddr && entry->size) - dma_free_writecombine(dev->dev, entry->size, entry->vaddr, - entry->paddr); + if (buffer->dma_addr && buffer->size) + dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr, + (dma_addr_t)buffer->dma_addr); else - DRM_DEBUG_KMS("entry data is null.\n"); + DRM_DEBUG_KMS("buffer data are invalid.\n"); } -struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, +struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, unsigned int size) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; DRM_DEBUG_KMS("%s.\n", __FILE__); + DRM_DEBUG_KMS("desired size = 0x%x\n", size); - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { - DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); return ERR_PTR(-ENOMEM); } - entry->size = size; + buffer->size = size; /* * allocate memory region with size and set the memory information - * to vaddr and paddr of a entry object. + * to vaddr and dma_addr of a buffer object. */ - if (lowlevel_buffer_allocate(dev, entry) < 0) { - kfree(entry); - entry = NULL; + if (lowlevel_buffer_allocate(dev, buffer) < 0) { + kfree(buffer); + buffer = NULL; return ERR_PTR(-ENOMEM); } - return entry; + return buffer; } void exynos_drm_buf_destroy(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s.\n", __FILE__); - if (!entry) { - DRM_DEBUG_KMS("entry is null.\n"); + if (!buffer) { + DRM_DEBUG_KMS("buffer is null.\n"); return; } - lowlevel_buffer_deallocate(dev, entry); + lowlevel_buffer_deallocate(dev, buffer); - kfree(entry); - entry = NULL; + kfree(buffer); + buffer = NULL; } MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h index 045d59eab01a..6e91f9caa5db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -26,28 +26,15 @@ #ifndef _EXYNOS_DRM_BUF_H_ #define _EXYNOS_DRM_BUF_H_ -/* - * exynos drm buffer entry structure. - * - * @paddr: physical address of allocated memory. - * @vaddr: kernel virtual address of allocated memory. - * @size: size of allocated memory. - */ -struct exynos_drm_buf_entry { - dma_addr_t paddr; - void __iomem *vaddr; - unsigned int size; -}; - /* allocate physical memory. */ -struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, +struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, unsigned int size); -/* get physical memory information of a drm framebuffer. */ -struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); +/* get memory information of a drm framebuffer. */ +struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); /* remove allocated physical memory. */ void exynos_drm_buf_destroy(struct drm_device *dev, - struct exynos_drm_buf_entry *entry); + struct exynos_drm_gem_buf *buffer); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 985d9e768728..d620b0784257 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -37,6 +37,8 @@ struct exynos_drm_connector { struct drm_connector drm_connector; + uint32_t encoder_id; + struct exynos_drm_manager *manager; }; /* convert exynos_video_timings to drm_display_mode */ @@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode, DRM_DEBUG_KMS("%s\n", __FILE__); mode->clock = timing->pixclock / 1000; + mode->vrefresh = timing->refresh; mode->hdisplay = timing->xres; mode->hsync_start = mode->hdisplay + timing->left_margin; @@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode, mode->vsync_start = mode->vdisplay + timing->upper_margin; mode->vsync_end = mode->vsync_start + timing->vsync_len; mode->vtotal = mode->vsync_end + timing->lower_margin; + + if (timing->vmode & FB_VMODE_INTERLACED) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + + if (timing->vmode & FB_VMODE_DOUBLE) + mode->flags |= DRM_MODE_FLAG_DBLSCAN; } /* convert drm_display_mode to exynos_video_timings */ @@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing, memset(timing, 0, sizeof(*timing)); timing->pixclock = mode->clock * 1000; - timing->refresh = mode->vrefresh; + timing->refresh = drm_mode_vrefresh(mode); timing->xres = mode->hdisplay; timing->left_margin = mode->hsync_start - mode->hdisplay; @@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing, static int exynos_drm_connector_get_modes(struct drm_connector *connector) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = manager->display_ops; unsigned int count; DRM_DEBUG_KMS("%s\n", __FILE__); - if (!display) { - DRM_DEBUG_KMS("display is null.\n"); + if (!display_ops) { + DRM_DEBUG_KMS("display_ops is null.\n"); return 0; } @@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) * P.S. in case of lcd panel, count is always 1 if success * because lcd panel has only one mode. */ - if (display->get_edid) { + if (display_ops->get_edid) { int ret; void *edid; @@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) return 0; } - ret = display->get_edid(manager->dev, connector, + ret = display_ops->get_edid(manager->dev, connector, edid, MAX_EDID); if (ret < 0) { DRM_ERROR("failed to get edid data.\n"); @@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = drm_mode_create(connector->dev); struct fb_videomode *timing; - if (display->get_timing) - timing = display->get_timing(manager->dev); + if (display_ops->get_timing) + timing = display_ops->get_timing(manager->dev); else { drm_mode_destroy(connector->dev, mode); return 0; @@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) static int exynos_drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = manager->display_ops; struct fb_videomode timing; int ret = MODE_BAD; @@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, convert_to_video_timing(&timing, mode); - if (display && display->check_timing) - if (!display->check_timing(manager->dev, (void *)&timing)) + if (display_ops && display_ops->check_timing) + if (!display_ops->check_timing(manager->dev, (void *)&timing)) ret = MODE_OK; return ret; @@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct drm_mode_object *obj; + struct drm_encoder *encoder; + DRM_DEBUG_KMS("%s\n", __FILE__); - return connector->encoder; + obj = drm_mode_object_find(dev, exynos_connector->encoder_id, + DRM_MODE_OBJECT_ENCODER); + if (!obj) { + DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", + exynos_connector->encoder_id); + return NULL; + } + + encoder = obj_to_encoder(obj); + + return encoder; } static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { @@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { static enum drm_connector_status exynos_drm_connector_detect(struct drm_connector *connector, bool force) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = + manager->display_ops; enum drm_connector_status status = connector_status_disconnected; DRM_DEBUG_KMS("%s\n", __FILE__); - if (display && display->is_connected) { - if (display->is_connected(manager->dev)) + if (display_ops && display_ops->is_connected) { + if (display_ops->is_connected(manager->dev)) status = connector_status_connected; else status = connector_status_disconnected; @@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, connector = &exynos_connector->drm_connector; - switch (manager->display->type) { + switch (manager->display_ops->type) { case EXYNOS_DISPLAY_TYPE_HDMI: type = DRM_MODE_CONNECTOR_HDMIA; + connector->interlace_allowed = true; + connector->polled = DRM_CONNECTOR_POLL_HPD; break; default: type = DRM_MODE_CONNECTOR_Unknown; @@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, if (err) goto err_connector; + exynos_connector->encoder_id = encoder->base.id; + exynos_connector->manager = manager; connector->encoder = encoder; + err = drm_mode_connector_attach_encoder(connector, encoder); if (err) { DRM_ERROR("failed to attach a connector to a encoder\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 9337e5e2dbb6..ee43cc220853 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -29,36 +29,17 @@ #include "drmP.h" #include "drm_crtc_helper.h" +#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ drm_crtc) /* - * Exynos specific crtc postion structure. - * - * @fb_x: offset x on a framebuffer to be displyed - * - the unit is screen coordinates. - * @fb_y: offset y on a framebuffer to be displayed - * - the unit is screen coordinates. - * @crtc_x: offset x on hardware screen. - * @crtc_y: offset y on hardware screen. - * @crtc_w: width of hardware screen. - * @crtc_h: height of hardware screen. - */ -struct exynos_drm_crtc_pos { - unsigned int fb_x; - unsigned int fb_y; - unsigned int crtc_x; - unsigned int crtc_y; - unsigned int crtc_w; - unsigned int crtc_h; -}; - -/* * Exynos specific crtc structure. * * @drm_crtc: crtc object. @@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc) exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_crtc_mode_set); - exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); } -static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, - struct exynos_drm_crtc_pos *pos) +int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, + struct drm_framebuffer *fb, + struct drm_display_mode *mode, + struct exynos_drm_crtc_pos *pos) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; unsigned int actual_w; unsigned int actual_h; - entry = exynos_drm_fb_get_buf(fb); - if (!entry) { - DRM_LOG_KMS("entry is null.\n"); + buffer = exynos_drm_fb_get_buf(fb); + if (!buffer) { + DRM_LOG_KMS("buffer is null.\n"); return -EFAULT; } - overlay->paddr = entry->paddr; - overlay->vaddr = entry->vaddr; + overlay->dma_addr = buffer->dma_addr; + overlay->vaddr = buffer->kvaddr; - DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", + DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", (unsigned long)overlay->vaddr, - (unsigned long)overlay->paddr); + (unsigned long)overlay->dma_addr); actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); @@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc) static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) { - DRM_DEBUG_KMS("%s\n", __FILE__); + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - /* TODO */ + DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); + + switch (mode) { + case DRM_MODE_DPMS_ON: + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* TODO */ + exynos_drm_fn_encoder(crtc, NULL, + exynos_drm_encoder_crtc_disable); + break; + default: + DRM_DEBUG_KMS("unspecified mode %d\n", mode); + break; + } } static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) @@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) static void exynos_drm_crtc_commit(struct drm_crtc *crtc) { + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + DRM_DEBUG_KMS("%s\n", __FILE__); - /* drm framework doesn't check NULL. */ + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); } static bool diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index c584042d6d2c..25f72a62cb88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); +/* + * Exynos specific crtc postion structure. + * + * @fb_x: offset x on a framebuffer to be displyed + * - the unit is screen coordinates. + * @fb_y: offset y on a framebuffer to be displayed + * - the unit is screen coordinates. + * @crtc_x: offset x on hardware screen. + * @crtc_y: offset y on hardware screen. + * @crtc_w: width of hardware screen. + * @crtc_h: height of hardware screen. + */ +struct exynos_drm_crtc_pos { + unsigned int fb_x; + unsigned int fb_y; + unsigned int crtc_x; + unsigned int crtc_y; + unsigned int crtc_w; + unsigned int crtc_h; +}; + +int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, + struct drm_framebuffer *fb, + struct drm_display_mode *mode, + struct exynos_drm_crtc_pos *pos); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 83810cbe3c17..53e2216de61d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -27,6 +27,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_crtc_helper.h" #include <drm/exynos_drm.h> @@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) drm_mode_config_init(dev); + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(dev); + exynos_drm_mode_config_init(dev); /* @@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev) exynos_drm_fbdev_fini(dev); exynos_drm_device_unregister(dev); drm_vblank_cleanup(dev); + drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); kfree(dev->dev_private); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c03683f2ae72..5e02e6ecc2e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -29,6 +29,7 @@ #ifndef _EXYNOS_DRM_DRV_H_ #define _EXYNOS_DRM_DRV_H_ +#include <linux/module.h> #include "drm.h" #define MAX_CRTC 2 @@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops { * @scan_flag: interlace or progressive way. * (it could be DRM_MODE_FLAG_*) * @bpp: pixel size.(in bit) - * @paddr: bus(accessed by dma) physical memory address to this overlay - * and this is physically continuous. + * @dma_addr: bus(accessed by dma) address to the memory region allocated + * for a overlay. * @vaddr: virtual memory addresss to this overlay. * @default_win: a window to be enabled. * @color_key: color key on or off. @@ -108,7 +109,7 @@ struct exynos_drm_overlay { unsigned int scan_flag; unsigned int bpp; unsigned int pitch; - dma_addr_t paddr; + dma_addr_t dma_addr; void __iomem *vaddr; bool default_win; @@ -130,7 +131,7 @@ struct exynos_drm_overlay { * @check_timing: check if timing is valid or not. * @power_on: display device on or off. */ -struct exynos_drm_display { +struct exynos_drm_display_ops { enum exynos_drm_output_type type; bool (*is_connected)(struct device *dev); int (*get_edid)(struct device *dev, struct drm_connector *connector, @@ -146,12 +147,14 @@ struct exynos_drm_display { * @mode_set: convert drm_display_mode to hw specific display mode and * would be called by encoder->mode_set(). * @commit: set current hw specific display mode to hw. + * @disable: disable hardware specific display mode. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. */ struct exynos_drm_manager_ops { void (*mode_set)(struct device *subdrv_dev, void *mode); void (*commit)(struct device *subdrv_dev); + void (*disable)(struct device *subdrv_dev); int (*enable_vblank)(struct device *subdrv_dev); void (*disable_vblank)(struct device *subdrv_dev); }; @@ -178,7 +181,7 @@ struct exynos_drm_manager { int pipe; struct exynos_drm_manager_ops *ops; struct exynos_drm_overlay_ops *overlay_ops; - struct exynos_drm_display *display; + struct exynos_drm_display_ops *display_ops; }; /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 7cf6fa86a67e..153061415baf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); + struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); + switch (mode) { + case DRM_MODE_DPMS_ON: + if (manager_ops && manager_ops->commit) + manager_ops->commit(manager->dev); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* TODO */ + if (manager_ops && manager_ops->disable) + manager_ops->disable(manager->dev); + break; + default: + DRM_ERROR("unspecified mode %d\n", mode); + break; + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { - struct exynos_drm_display *display = manager->display; + struct exynos_drm_display_ops *display_ops = + manager->display_ops; - if (display && display->power_on) - display->power_on(manager->dev, mode); + DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", + connector->base.id, mode); + if (display_ops && display_ops->power_on) + display_ops->power_on(manager->dev, mode); } } } @@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) { struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; - struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->commit) manager_ops->commit(manager->dev); - - if (overlay_ops && overlay_ops->commit) - overlay_ops->commit(manager->dev); } static struct drm_crtc * @@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; + struct exynos_drm_private *private = dev->dev_private; + struct exynos_drm_manager *manager; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc != crtc) - continue; + /* + * if crtc is detached from encoder, check pipe, + * otherwise check crtc attached to encoder + */ + if (!encoder->crtc) { + manager = to_exynos_encoder(encoder)->manager; + if (manager->pipe < 0 || + private->crtc[manager->pipe] != crtc) + continue; + } else { + if (encoder->crtc != crtc) + continue; + } fn(encoder, data); } @@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + int crtc = *(int *)data; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + /* + * when crtc is detached from encoder, this pipe is used + * to select manager operation + */ + manager->pipe = crtc; - overlay_ops->commit(manager->dev); + if (overlay_ops && overlay_ops->commit) + overlay_ops->commit(manager->dev); } void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) @@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = data; - overlay_ops->mode_set(manager->dev, overlay); + if (overlay_ops && overlay_ops->mode_set) + overlay_ops->mode_set(manager->dev, overlay); +} + +void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) +{ + struct exynos_drm_manager *manager = + to_exynos_encoder(encoder)->manager; + struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + + DRM_DEBUG_KMS("\n"); + + if (overlay_ops && overlay_ops->disable) + overlay_ops->disable(manager->dev); + + /* + * crtc is already detached from encoder and last + * function for detaching is properly done, so + * clear pipe from manager to prevent repeated call + */ + if (!encoder->crtc) + manager->pipe = -1; } MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 5ecd645d06a9..a22acfbf0e4e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 48d29cfd5240..5bf4a1ac7f82 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -29,7 +29,9 @@ #include "drmP.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" +#include "drm_fb_helper.h" +#include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_buf.h" #include "exynos_drm_gem.h" @@ -41,14 +43,14 @@ * * @fb: drm framebuffer obejct. * @exynos_gem_obj: exynos specific gem object containing a gem object. - * @entry: pointer to exynos drm buffer entry object. - * - containing only the information to physically continuous memory - * region allocated at default framebuffer creation. + * @buffer: pointer to exynos_drm_gem_buffer object. + * - contain the memory information to memory region allocated + * at default framebuffer creation. */ struct exynos_drm_fb { struct drm_framebuffer fb; struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; }; static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) @@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) * default framebuffer has no gem object so * a buffer of the default framebuffer should be released at here. */ - if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) - exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); + if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer) + exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer); kfree(exynos_fb); exynos_fb = NULL; @@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, */ if (!mode_cmd->handle) { if (!file_priv) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; /* * in case that file_priv is NULL, it allocates * only buffer and this buffer would be used * for default framebuffer. */ - entry = exynos_drm_buf_create(dev, size); - if (IS_ERR(entry)) { - ret = PTR_ERR(entry); + buffer = exynos_drm_buf_create(dev, size); + if (IS_ERR(buffer)) { + ret = PTR_ERR(buffer); goto err_buffer; } - exynos_fb->entry = entry; + exynos_fb->buffer = buffer; - DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", - (unsigned long)entry->paddr, size); + DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n", + (unsigned long)buffer->dma_addr, size); goto out; } else { - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, - size, - &mode_cmd->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, + &mode_cmd->handle, + size); if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); goto err_buffer; @@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, * so that default framebuffer has no its own gem object, * only its own buffer object. */ - exynos_fb->entry = exynos_gem_obj->entry; + exynos_fb->buffer = exynos_gem_obj->buffer; - DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", - (unsigned long)exynos_fb->entry->paddr, size, + DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n", + (unsigned long)exynos_fb->buffer->dma_addr, size, (unsigned int)&exynos_gem_obj->base); out: @@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, return exynos_drm_fb_init(file_priv, dev, mode_cmd); } -struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) +struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; DRM_DEBUG_KMS("%s\n", __FILE__); - entry = exynos_fb->entry; - if (!entry) + buffer = exynos_fb->buffer; + if (!buffer) return NULL; - DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", - (unsigned long)entry->vaddr, - (unsigned long)entry->paddr); + DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", + (unsigned long)buffer->kvaddr, + (unsigned long)buffer->dma_addr); - return entry; + return buffer; +} + +static void exynos_drm_output_poll_changed(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + struct drm_fb_helper *fb_helper = private->fb_helper; + + if (fb_helper) + drm_fb_helper_hotplug_event(fb_helper); } static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { .fb_create = exynos_drm_fb_create, + .output_poll_changed = exynos_drm_output_poll_changed, }; void exynos_drm_mode_config_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 1f4b3d1a7713..836f41008187 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -33,6 +33,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" #define MAX_CONNECTOR 4 @@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = { }; static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, - struct drm_framebuffer *fb, - unsigned int fb_width, - unsigned int fb_height) + struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; struct drm_device *dev = helper->dev; struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); - struct exynos_drm_buf_entry *entry; - unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); + struct exynos_drm_gem_buf *buffer; + unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); unsigned long offset; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, exynos_fb->fb = fb; drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); + drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); - entry = exynos_drm_fb_get_buf(fb); - if (!entry) { - DRM_LOG_KMS("entry is null.\n"); + buffer = exynos_drm_fb_get_buf(fb); + if (!buffer) { + DRM_LOG_KMS("buffer is null.\n"); return -EFAULT; } offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); offset += fbi->var.yoffset * fb->pitch; - dev->mode_config.fb_base = entry->paddr; - fbi->screen_base = entry->vaddr + offset; - fbi->fix.smem_start = entry->paddr + offset; + dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; + fbi->screen_base = buffer->kvaddr + offset; + fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); fbi->screen_size = size; fbi->fix.smem_len = size; @@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, goto out; } - ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, - sizes->fb_height); + ret = exynos_drm_fbdev_update(helper, helper->fb); if (ret < 0) fb_dealloc_cmap(&fbi->cmap); @@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper, } helper->fb = exynos_fbdev->fb; - return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, - sizes->fb_height); + return exynos_drm_fbdev_update(helper, helper->fb); } static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, @@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) fb_helper = private->fb_helper; if (fb_helper) { + struct list_head temp_list; + + INIT_LIST_HEAD(&temp_list); + + /* + * fb_helper is reintialized but kernel fb is reused + * so kernel_fb_list need to be backuped and restored + */ + if (!list_empty(&fb_helper->kernel_fb_list)) + list_replace_init(&fb_helper->kernel_fb_list, + &temp_list); + drm_fb_helper_fini(fb_helper); ret = drm_fb_helper_init(dev, fb_helper, @@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) return ret; } + if (!list_empty(&temp_list)) + list_replace(&temp_list, &fb_helper->kernel_fb_list); + ret = drm_fb_helper_single_add_all_connectors(fb_helper); if (ret < 0) { DRM_ERROR("failed to add fb helper to connectors\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 4659c88cdd9b..db3b3d9e731d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -64,7 +64,7 @@ struct fimd_win_data { unsigned int fb_width; unsigned int fb_height; unsigned int bpp; - dma_addr_t paddr; + dma_addr_t dma_addr; void __iomem *vaddr; unsigned int buf_offsize; unsigned int line_size; /* bytes */ @@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode) return 0; } -static struct exynos_drm_display fimd_display = { +static struct exynos_drm_display_ops fimd_display_ops = { .type = EXYNOS_DISPLAY_TYPE_LCD, .is_connected = fimd_display_is_connected, .get_timing = fimd_get_timing, @@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev) writel(val, ctx->regs + VIDCON0); } +static void fimd_disable(struct device *dev) +{ + struct fimd_context *ctx = get_fimd_context(dev); + struct exynos_drm_subdrv *subdrv = &ctx->subdrv; + struct drm_device *drm_dev = subdrv->drm_dev; + struct exynos_drm_manager *manager = &subdrv->manager; + u32 val; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + /* fimd dma off */ + val = readl(ctx->regs + VIDCON0); + val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F); + writel(val, ctx->regs + VIDCON0); + + /* + * if vblank is enabled status with dma off then + * it disables vsync interrupt. + */ + if (drm_dev->vblank_enabled[manager->pipe] && + atomic_read(&drm_dev->vblank_refcount[manager->pipe])) { + drm_vblank_put(drm_dev, manager->pipe); + + /* + * if vblank_disable_allowed is 0 then disable + * vsync interrupt right now else the vsync interrupt + * would be disabled by drm timer once a current process + * gives up ownershop of vblank event. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, manager->pipe); + } +} + static int fimd_enable_vblank(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); @@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev) static struct exynos_drm_manager_ops fimd_manager_ops = { .commit = fimd_commit, + .disable = fimd_disable, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, }; @@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev, win_data->ovl_height = overlay->crtc_height; win_data->fb_width = overlay->fb_width; win_data->fb_height = overlay->fb_height; - win_data->paddr = overlay->paddr + offset; + win_data->dma_addr = overlay->dma_addr + offset; win_data->vaddr = overlay->vaddr + offset; win_data->bpp = overlay->bpp; win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * @@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev, DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", win_data->ovl_width, win_data->ovl_height); DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", - (unsigned long)win_data->paddr, + (unsigned long)win_data->dma_addr, (unsigned long)win_data->vaddr); DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", overlay->fb_width, overlay->crtc_width); @@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev) writel(val, ctx->regs + SHADOWCON); /* buffer start address */ - val = win_data->paddr; + val = (unsigned long)win_data->dma_addr; writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); - val = win_data->paddr + size; + val = (unsigned long)(win_data->dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", - (unsigned long)win_data->paddr, val, size); + (unsigned long)win_data->dma_addr, val, size); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", win_data->ovl_width, win_data->ovl_height); @@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev) static void fimd_win_disable(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); - struct fimd_win_data *win_data; int win = ctx->default_win; u32 val; @@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev) if (win < 0 || win > WINDOWS_NR) return; - win_data = &ctx->win_data[win]; - /* protect windows */ val = readl(ctx->regs + SHADOWCON); val |= SHADOWCON_WINx_PROTECT(win); @@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) /* VSYNC interrupt */ writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); + /* + * in case that vblank_disable_allowed is 1, it could induce + * the problem that manager->pipe could be -1 because with + * disable callback, vsync interrupt isn't disabled and at this moment, + * vsync interrupt could occur. the vsync interrupt would be disabled + * by timer handler later. + */ + if (manager->pipe == -1) + return IRQ_HANDLED; + drm_handle_vblank(drm_dev, manager->pipe); fimd_finish_pageflip(drm_dev, manager->pipe); @@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) */ drm_dev->irq_enabled = 1; - /* - * with vblank_disable_allowed = 1, vblank interrupt will be disabled - * by drm timer once a current process gives up ownership of - * vblank event.(drm_vblank_put function was called) - */ - drm_dev->vblank_disable_allowed = 1; - return 0; } @@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) subdrv->manager.pipe = -1; subdrv->manager.ops = &fimd_manager_ops; subdrv->manager.overlay_ops = &fimd_overlay_ops; - subdrv->manager.display = &fimd_display; + subdrv->manager.display_ops = &fimd_display_ops; subdrv->manager.dev = dev; platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index a8e7a88906ed..aba0fe47f7ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; } -struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, - struct drm_device *dev, unsigned int size, - unsigned int *handle) +static struct exynos_drm_gem_obj + *exynos_drm_gem_init(struct drm_device *drm_dev, + struct drm_file *file_priv, unsigned int *handle, + unsigned int size) { struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_buf_entry *entry; struct drm_gem_object *obj; int ret; - DRM_DEBUG_KMS("%s\n", __FILE__); - - size = roundup(size, PAGE_SIZE); - exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); if (!exynos_gem_obj) { DRM_ERROR("failed to allocate exynos gem object.\n"); return ERR_PTR(-ENOMEM); } - /* allocate the new buffer object and memory region. */ - entry = exynos_drm_buf_create(dev, size); - if (!entry) { - kfree(exynos_gem_obj); - return ERR_PTR(-ENOMEM); - } - - exynos_gem_obj->entry = entry; - obj = &exynos_gem_obj->base; - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(drm_dev, obj, size); if (ret < 0) { - DRM_ERROR("failed to initailize gem object.\n"); - goto err_obj_init; + DRM_ERROR("failed to initialize gem object.\n"); + ret = -EINVAL; + goto err_object_init; } DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); @@ -127,24 +115,50 @@ err_handle_create: err_create_mmap_offset: drm_gem_object_release(obj); -err_obj_init: - exynos_drm_buf_destroy(dev, exynos_gem_obj->entry); - +err_object_init: kfree(exynos_gem_obj); return ERR_PTR(ret); } +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, + struct drm_file *file_priv, + unsigned int *handle, unsigned long size) +{ + + struct exynos_drm_gem_obj *exynos_gem_obj = NULL; + struct exynos_drm_gem_buf *buffer; + + size = roundup(size, PAGE_SIZE); + + DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); + + buffer = exynos_drm_buf_create(dev, size); + if (IS_ERR(buffer)) { + return ERR_CAST(buffer); + } + + exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size); + if (IS_ERR(exynos_gem_obj)) { + exynos_drm_buf_destroy(dev, buffer); + return exynos_gem_obj; + } + + exynos_gem_obj->buffer = buffer; + + return exynos_gem_obj; +} + int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file_priv) { struct drm_exynos_gem_create *args = data; - struct exynos_drm_gem_obj *exynos_gem_obj; + struct exynos_drm_gem_obj *exynos_gem_obj = NULL; - DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); + DRM_DEBUG_KMS("%s\n", __FILE__); - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, - &args->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, + &args->handle, args->size); if (IS_ERR(exynos_gem_obj)) return PTR_ERR(exynos_gem_obj); @@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; unsigned long pfn, vm_size; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, vm_size = vma->vm_end - vma->vm_start; /* - * a entry contains information to physically continuous memory + * a buffer contains information to physically continuous memory * allocated by user request or at framebuffer creation. */ - entry = exynos_gem_obj->entry; + buffer = exynos_gem_obj->buffer; /* check if user-requested size is valid. */ - if (vm_size > entry->size) + if (vm_size > buffer->size) return -EINVAL; /* * get page frame number to physical memory to be mapped * to user space. */ - pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; + pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT; DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); @@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj) exynos_gem_obj = to_exynos_gem_obj(gem_obj); - exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); + exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer); kfree(exynos_gem_obj); } @@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * args->bpp >> 3; args->size = args->pitch * args->height; - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, - &args->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle, + args->size); if (IS_ERR(exynos_gem_obj)) return PTR_ERR(exynos_gem_obj); @@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; + pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >> + PAGE_SHIFT) + page_offset; ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index e5fc0148277b..ef8797334e6d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -30,13 +30,29 @@ struct exynos_drm_gem_obj, base) /* + * exynos drm gem buffer structure. + * + * @kvaddr: kernel virtual address to allocated memory region. + * @dma_addr: bus address(accessed by dma) to allocated memory region. + * - this address could be physical address without IOMMU and + * device address with IOMMU. + * @size: size of allocated memory region. + */ +struct exynos_drm_gem_buf { + void __iomem *kvaddr; + dma_addr_t dma_addr; + unsigned long size; +}; + +/* * exynos drm buffer structure. * * @base: a gem object. * - a new handle to this gem object would be created * by drm_gem_handle_create(). - * @entry: pointer to exynos drm buffer entry object. - * - containing the information to physically + * @buffer: a pointer to exynos_drm_gem_buffer object. + * - contain the information to memory region allocated + * by user request or at framebuffer creation. * continuous memory region allocated by user request * or at framebuffer creation. * @@ -45,13 +61,13 @@ */ struct exynos_drm_gem_obj { struct drm_gem_object base; - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; }; /* create a new buffer and get a new gem handle. */ -struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, - struct drm_device *dev, unsigned int size, - unsigned int *handle); +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, + struct drm_file *file_priv, + unsigned int *handle, unsigned long size); /* * request gem object creation and buffer allocation as the size diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 08792a740f18..07d55df6623e 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -24,6 +24,8 @@ * */ +#include <linux/module.h> + #include "ch7006_priv.h" /* DRM encoder functions */ diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index 0b6773290c08..b7d45ab4ba69 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -24,6 +24,8 @@ * */ +#include <linux/module.h> + #include "drmP.h" #include "drm_crtc_helper.h" #include "drm_encoder_slave.h" diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 6f98d059f68a..d4266bdf6fb4 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -30,6 +30,8 @@ * Gareth Hughes <gareth@valinux.com> */ +#include <linux/module.h> + #include "drmP.h" #include "drm.h" #include "i810_drm.h" diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8e95d66800b0..d09a6e02dc95 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -29,6 +29,7 @@ #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> +#include <linux/export.h> #include "drmP.h" #include "drm.h" #include "intel_drv.h" @@ -635,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; + int ret; ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (ring->size == 0) return 0; + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + seq_printf(m, "Ring %s:\n", ring->name); seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); @@ -653,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -841,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u16 crstanddelay = I915_READ16(CRSTANDVID); + u16 crstanddelay; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + crstanddelay = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); @@ -939,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 delayfreq; - int i; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for (i = 0; i < 16; i++) { delayfreq = I915_READ(PXVFREQ_BASE + i * 4); @@ -947,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -961,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 inttoext; - int i; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for (i = 1; i <= 32; i++) { inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -976,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); - u32 rstdbyctl = I915_READ(RSTDBYCTL); - u16 crstandvid = I915_READ16(CRSTANDVID); + u32 rgvmodectl, rstdbyctl; + u16 crstandvid; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + rgvmodectl = I915_READ(MEMMODECTL); + rstdbyctl = I915_READ(RSTDBYCTL); + crstandvid = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? "yes" : "no"); @@ -1166,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -1505,7 +1551,10 @@ drm_add_fake_info_node(struct drm_minor *minor, node->minor = minor; node->dent = ent; node->info_ent = (void *) key; - list_add(&node->list, &minor->debugfs_nodes.list); + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); return 0; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2eac955dee18..a9533c54c93c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -41,6 +41,7 @@ #include <linux/pnp.h> #include <linux/vga_switcheroo.h> #include <linux/slab.h> +#include <linux/module.h> #include <acpi/video.h> static void i915_write_hws_pga(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4c8d681c2151..15bfa9145d2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,6 +35,7 @@ #include "intel_drv.h" #include <linux/console.h> +#include <linux/module.h> #include "drm_crtc_helper.h" static int i915_modeset __read_mostly = -1; @@ -67,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6 (default: true)"); -unsigned int i915_enable_fbc __read_mostly = -1; +int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " @@ -79,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock, "Use panel (LVDS/eDP) downclocking for power savings " "(default: false)"); -unsigned int i915_panel_use_ssc __read_mostly = -1; +int i915_panel_use_ssc __read_mostly = -1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); MODULE_PARM_DESC(lvds_use_ssc, "Use Spread Spectrum Clock with panels [LVDS/eDP] " @@ -106,7 +107,7 @@ static struct drm_driver driver; extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ - .class = PCI_CLASS_DISPLAY_VGA << 8, \ + .class = PCI_BASE_CLASS_DISPLAY << 16, \ .class_mask = 0xff0000, \ .vendor = 0x8086, \ .device = id, \ @@ -788,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = { }; static struct drm_driver driver = { - /* don't use mtrr's here, the Xserver or user space app should - * deal with them for intel hardware. + /* Don't use MTRRs here; the Xserver or userspace app should + * deal with them for Intel hardware. */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 06a37f4fd74b..4a9c1b979804 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -126,6 +126,9 @@ struct drm_i915_master_private { struct _drm_i915_sarea *sarea_priv; }; #define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 16 +/* 16 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 5 struct drm_i915_fence_reg { struct list_head lru_list; @@ -168,7 +171,7 @@ struct drm_i915_error_state { u32 instdone1; u32 seqno; u64 bbaddr; - u64 fence[16]; + u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_object { int page_count; @@ -182,7 +185,7 @@ struct drm_i915_error_state { u32 gtt_offset; u32 read_domains; u32 write_domain; - s32 fence_reg:5; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; s32 pinned:2; u32 tiling:2; u32 dirty:1; @@ -375,7 +378,7 @@ typedef struct drm_i915_private { struct notifier_block lid_notifier; int crt_ddc_pin; - struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ @@ -506,7 +509,7 @@ typedef struct drm_i915_private { u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; - uint64_t saveFENCE[16]; + uint64_t saveFENCE[I915_MAX_NUM_FENCES]; u32 saveCURACNTR; u32 saveCURAPOS; u32 saveCURABASE; @@ -777,10 +780,8 @@ struct drm_i915_gem_object { * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. - * - * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) */ - signed int fence_reg:5; + signed int fence_reg:I915_MAX_NUM_FENCE_BITS; /** * Advice: are the backing pages purgeable? @@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly; extern unsigned int i915_powersave __read_mostly; extern unsigned int i915_semaphores __read_mostly; extern unsigned int i915_lvds_downclock __read_mostly; -extern unsigned int i915_panel_use_ssc __read_mostly; +extern int i915_panel_use_ssc __read_mostly; extern int i915_vbt_sdvo_panel_type __read_mostly; extern unsigned int i915_enable_rc6 __read_mostly; -extern unsigned int i915_enable_fbc __read_mostly; +extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6651c36b6e8a..8359dc777041 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file, if (obj->base.size > dev_priv->mm.gtt_mappable_end) { ret = -E2BIG; - goto unlock; + goto out; } if (obj->madv != I915_MADV_WILLNEED) { @@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - for (i = 0; i < 16; i++) { + for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_gem_object *obj = reg->obj; @@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * so emit a request to do so. */ request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request) + if (request) { ret = i915_add_request(obj->ring, NULL, request); - else + if (ret) + kfree(request); + } else ret = -ENOMEM; } @@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; - if (IS_GEN6(dev)) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { /* On Gen6, we can have the GPU use the LLC (the CPU * cache) for about a 10% performance improvement * compared to uncached. Graphics requests other than @@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->ring[i]); - for (i = 0; i < 16; i++) + for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9ee2729fe5c6..b40004b55977 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev, /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a09416e611f..b080cc824001 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1553,12 +1553,21 @@ */ #define PP_READY (1 << 30) #define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_ON (1 << 28) -#define PP_SEQUENCE_OFF (2 << 28) -#define PP_SEQUENCE_MASK 0x30000000 +#define PP_SEQUENCE_POWER_UP (1 << 28) +#define PP_SEQUENCE_POWER_DOWN (2 << 28) +#define PP_SEQUENCE_MASK (3 << 28) +#define PP_SEQUENCE_SHIFT 28 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) #define PP_SEQUENCE_STATE_MASK 0x0000000f +#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) +#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) +#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) +#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) +#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) +#define PP_SEQUENCE_STATE_RESET (0xf << 0) #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 @@ -3444,6 +3453,10 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GEN6_UCGCTL2 0x9404 +# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) +# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f8f602d76650..7886e4fb60e3 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); @@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 981b1f1c04d8..e77a863a3833 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); @@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, lvds_bpc = 6; if (lvds_bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); display_bpc = lvds_bpc; } continue; @@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, unsigned int edp_bpc = dev_priv->edp.bpp / 3; if (edp_bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); display_bpc = edp_bpc; } continue; @@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, /* Don't use an invalid EDID bpc value */ if (connector->display_info.bpc && connector->display_info.bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); display_bpc = connector->display_info.bpc; } } @@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, */ if (intel_encoder->type == INTEL_OUTPUT_HDMI) { if (display_bpc > 8 && display_bpc < 12) { - DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); + DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); display_bpc = 12; } else { - DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); + DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); display_bpc = 8; } } @@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, display_bpc = min(display_bpc, bpc); - DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", - bpc, display_bpc); + DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", + bpc, display_bpc); *pipe_bpp = display_bpc * 3; @@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; if ((is_lvds && dev_priv->lvds_dither) || dither) { pipeconf |= PIPECONF_DITHER_EN; - pipeconf |= PIPECONF_DITHER_TYPE_ST1; + pipeconf |= PIPECONF_DITHER_TYPE_SP; } if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); @@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + /* * According to the spec the following bits should be * set in order to enable memory self-refresh and fbc: diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fc1a0832af4f..4d0358fad937 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -27,6 +27,7 @@ #include <linux/i2c.h> #include <linux/slab.h> +#include <linux/export.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" @@ -58,7 +59,6 @@ struct intel_dp { struct i2c_algo_dp_aux_data algo; bool is_pch_edp; uint8_t train_set[4]; - uint8_t link_status[DP_LINK_STATUS_SIZE]; int panel_power_up_delay; int panel_power_down_delay; int panel_power_cycle_delay; @@ -67,7 +67,6 @@ struct intel_dp { struct drm_display_mode *panel_fixed_mode; /* for eDP */ struct delayed_work panel_vdd_work; bool want_panel_vdd; - unsigned long panel_off_jiffies; }; /** @@ -156,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder, static int intel_dp_max_lane_count(struct intel_dp *intel_dp) { - int max_lane_count = 4; - - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { - max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; - switch (max_lane_count) { - case 1: case 2: case 4: - break; - default: - max_lane_count = 4; - } + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; } return max_lane_count; } @@ -767,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, continue; intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) + { lane_count = intel_dp->lane_count; break; - } else if (is_edp(intel_dp)) { - lane_count = dev_priv->edp.lanes; - break; } } @@ -809,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -821,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ironlake_edp_pll_off(encoder); } - intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; - intel_dp->DP |= intel_dp->color_range; + /* + * There are three kinds of DP registers: + * + * IBX PCH + * CPU + * CPT PCH + * + * IBX PCH and CPU are the same for almost everything, + * except that the CPU DP PLL is configured in this + * register + * + * CPT PCH is quite different, having many bits moved + * to the TRANS_DP_CTL register instead. That + * configuration happens (oddly) in ironlake_pch_enable + */ - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - intel_dp->DP |= DP_SYNC_HS_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - intel_dp->DP |= DP_SYNC_VS_HIGH; + /* Preserve the BIOS-computed detected bit. This is + * supposed to be read-only. + */ + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; - if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - else - intel_dp->DP |= DP_LINK_TRAIN_OFF; + /* Handle DP bits in common between all three register formats */ + + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; switch (intel_dp->lane_count) { case 1: @@ -851,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } - memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); intel_dp->link_configuration[0] = intel_dp->link_bw; intel_dp->link_configuration[1] = intel_dp->lane_count; intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; - /* * Check for DPCD version > 1.1 and enhanced framing support */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - intel_dp->DP |= DP_ENHANCED_FRAMING; } - /* CPT DP's pipe select is decided in TRANS_DP_CTL */ - if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) - intel_dp->DP |= DP_PIPEB_SELECT; + /* Split out the IBX/CPU vs CPT settings */ - if (is_cpu_edp(intel_dp)) { - /* don't miss out required setting for eDP */ - intel_dp->DP |= DP_PLL_ENABLE; - if (adjusted_mode->clock < 200000) - intel_dp->DP |= DP_PLL_FREQ_160MHZ; - else - intel_dp->DP |= DP_PLL_FREQ_270MHZ; + if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { + intel_dp->DP |= intel_dp->color_range; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF; + + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + if (intel_crtc->pipe == 1) + intel_dp->DP |= DP_PIPEB_SELECT; + + if (is_cpu_edp(intel_dp)) { + /* don't miss out required setting for eDP */ + intel_dp->DP |= DP_PLL_ENABLE; + if (adjusted_mode->clock < 200000) + intel_dp->DP |= DP_PLL_FREQ_160MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + } + } else { + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; } } -static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) + +#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) +#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +static void ironlake_wait_panel_status(struct intel_dp *intel_dp, + u32 mask, + u32 value) { - unsigned long off_time; - unsigned long delay; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; - DRM_DEBUG_KMS("Wait for panel power off time\n"); + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", + mask, value, + I915_READ(PCH_PP_STATUS), + I915_READ(PCH_PP_CONTROL)); - if (ironlake_edp_have_panel_power(intel_dp) || - ironlake_edp_have_panel_vdd(intel_dp)) - { - DRM_DEBUG_KMS("Panel still on, no delay needed\n"); - return; + if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { + DRM_ERROR("Panel status timeout: status %08x control %08x\n", + I915_READ(PCH_PP_STATUS), + I915_READ(PCH_PP_CONTROL)); } +} - off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); - if (time_after(jiffies, off_time)) { - DRM_DEBUG_KMS("Time already passed"); - return; - } - delay = jiffies_to_msecs(off_time - jiffies); - if (delay > intel_dp->panel_power_down_delay) - delay = intel_dp->panel_power_down_delay; - DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); - msleep(delay); +static void ironlake_wait_panel_on(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power on\n"); + ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); +} + +static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power off time\n"); + ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); +} + +static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); +} + + +/* Read the current pp_control value, unlocking the register if it + * is locked + */ + +static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) +{ + u32 control = I915_READ(PCH_PP_CONTROL); + + control &= ~PANEL_UNLOCK_MASK; + control |= PANEL_UNLOCK_REGS; + return control; } static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) @@ -920,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) "eDP VDD already requested on\n"); intel_dp->want_panel_vdd = true; + if (ironlake_edp_have_panel_vdd(intel_dp)) { DRM_DEBUG_KMS("eDP VDD already on\n"); return; } - ironlake_wait_panel_off(intel_dp); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + if (!ironlake_edp_have_panel_power(intel_dp)) + ironlake_wait_panel_power_cycle(intel_dp); + + pp = ironlake_get_pp_control(dev_priv); pp |= EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -951,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) u32 pp; if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -961,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) /* Make sure sequencer is idle before allowing subsequent activity */ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); - intel_dp->panel_off_jiffies = jiffies; + + msleep(intel_dp->panel_power_down_delay); } } @@ -971,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) struct intel_dp, panel_vdd_work); struct drm_device *dev = intel_dp->base.base.dev; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); } static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) @@ -983,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); - + intel_dp->want_panel_vdd = false; if (sync) { @@ -999,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) } } -/* Returns true if the panel was already on when called */ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; + u32 pp; if (!is_edp(intel_dp)) return; - if (ironlake_edp_have_panel_power(intel_dp)) + + DRM_DEBUG_KMS("Turn eDP power on\n"); + + if (ironlake_edp_have_panel_power(intel_dp)) { + DRM_DEBUG_KMS("eDP power already on\n"); return; + } - ironlake_wait_panel_off(intel_dp); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + ironlake_wait_panel_power_cycle(intel_dp); + pp = ironlake_get_pp_control(dev_priv); if (IS_GEN5(dev)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; @@ -1024,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) } pp |= POWER_TARGET_ON; + if (!IS_GEN5(dev)) + pp |= PANEL_POWER_RESET; + I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, - 5000)) - DRM_ERROR("panel on wait timed out: 0x%08x\n", - I915_READ(PCH_PP_STATUS)); + ironlake_wait_panel_on(intel_dp); if (IS_GEN5(dev)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ @@ -1039,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) } } -static void ironlake_edp_panel_off(struct drm_encoder *encoder) +static void ironlake_edp_panel_off(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | - PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; + u32 pp; if (!is_edp(intel_dp)) return; - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; - if (IS_GEN5(dev)) { - /* ILK workaround: disable reset around power sequence */ - pp &= ~PANEL_POWER_RESET; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - } + DRM_DEBUG_KMS("Turn eDP power off\n"); - intel_dp->panel_off_jiffies = jiffies; + WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); - if (IS_GEN5(dev)) { - pp &= ~POWER_TARGET_ON; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - pp &= ~POWER_TARGET_ON; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(intel_dp->panel_power_cycle_delay); - - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) - DRM_ERROR("panel off wait timed out: 0x%08x\n", - I915_READ(PCH_PP_STATUS)); + pp = ironlake_get_pp_control(dev_priv); + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - pp |= PANEL_POWER_RESET; /* restore panel reset bit */ - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - } + ironlake_wait_panel_off(intel_dp); } static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) @@ -1098,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) * allowing it to appear. */ msleep(intel_dp->backlight_on_delay); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -1116,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) return; DRM_DEBUG_KMS("\n"); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -1186,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + ironlake_edp_backlight_off(intel_dp); + ironlake_edp_panel_off(intel_dp); + /* Wake up the sink first */ ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); /* Make sure the panel is off before trying to * change the mode */ - ironlake_edp_backlight_off(intel_dp); - intel_dp_link_down(intel_dp); - ironlake_edp_panel_off(encoder); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1210,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); - intel_dp_complete_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); @@ -1229,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { + ironlake_edp_backlight_off(intel_dp); + ironlake_edp_panel_off(intel_dp); + ironlake_edp_panel_vdd_on(intel_dp); - if (is_edp(intel_dp)) - ironlake_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, mode); intel_dp_link_down(intel_dp); - ironlake_edp_panel_off(encoder); - if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) - ironlake_edp_pll_off(encoder); ironlake_edp_panel_vdd_off(intel_dp, false); + + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_off(encoder); } else { + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); if (!(dp_reg & DP_PORT_EN)) { @@ -1246,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); - ironlake_edp_backlight_on(intel_dp); } else ironlake_edp_panel_vdd_off(intel_dp, false); ironlake_edp_backlight_on(intel_dp); @@ -1284,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, * link status information */ static bool -intel_dp_get_link_status(struct intel_dp *intel_dp) +intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { return intel_dp_aux_native_read_retry(intel_dp, DP_LANE0_1_STATUS, - intel_dp->link_status, + link_status, DP_LINK_STATUS_SIZE); } @@ -1300,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], } static uint8_t -intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], +intel_get_adjust_request_voltage(uint8_t adjust_request[2], int lane) { - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; } static uint8_t -intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], +intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], int lane) { - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; } @@ -1343,6 +1376,7 @@ static char *link_train_names[] = { * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB */ #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 +#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200 static uint8_t intel_dp_pre_emphasis_max(uint8_t voltage_swing) @@ -1361,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_dp *intel_dp) +intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { + struct drm_device *dev = intel_dp->base.base.dev; uint8_t v = 0; uint8_t p = 0; int lane; + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); + int voltage_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); - uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); if (this_v > v) v = this_v; @@ -1377,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp) p = this_p; } - if (v >= I830_DP_VOLTAGE_MAX) - v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) + voltage_max = I830_DP_VOLTAGE_MAX_CPT; + else + voltage_max = I830_DP_VOLTAGE_MAX; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; if (p >= intel_dp_pre_emphasis_max(v)) p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; @@ -1388,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(uint8_t train_set, int lane_count) +intel_dp_signal_levels(uint8_t train_set) { uint32_t signal_levels = 0; @@ -1457,9 +1498,8 @@ static uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { - int i = DP_LANE0_1_STATUS + (lane >> 1); int s = (lane & 1) * 4; - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = link_status[lane>>1]; return (l >> s) & 0xf; } @@ -1484,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool -intel_channel_eq_ok(struct intel_dp *intel_dp) +intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { uint8_t lane_align; uint8_t lane_status; int lane; - lane_align = intel_dp_link_status(intel_dp->link_status, + lane_align = intel_dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; for (lane = 0; lane < intel_dp->lane_count; lane++) { - lane_status = intel_get_lane_status(intel_dp->link_status, lane); + lane_status = intel_get_lane_status(link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } @@ -1520,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, - intel_dp->train_set, 4); - if (ret != 4) + intel_dp->train_set, + intel_dp->lane_count); + if (ret != intel_dp->lane_count) return false; return true; @@ -1537,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) int i; uint8_t voltage; bool clock_recovery = false; - int tries; + int voltage_tries, loop_tries; u32 reg; uint32_t DP = intel_dp->DP; @@ -1564,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; - tries = 0; + voltage_tries = 0; + loop_tries = 0; clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + uint8_t link_status[DP_LINK_STATUS_SIZE]; uint32_t signal_levels; - if (IS_GEN6(dev) && is_edp(intel_dp)) { + + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1589,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_dp)) + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); break; + } - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("clock recovery OK\n"); clock_recovery = true; break; } @@ -1601,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count) - break; + if (i == intel_dp->lane_count) { + ++loop_tries; + if (loop_tries == 5) { + DRM_DEBUG_KMS("too many full retries, give up\n"); + break; + } + memset(intel_dp->train_set, 0, 4); + voltage_tries = 0; + continue; + } /* Check to see if we've tried the same voltage 5 times */ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) + ++voltage_tries; + if (voltage_tries == 5) { + DRM_DEBUG_KMS("too many voltage retries, give up\n"); break; + } } else - tries = 0; + voltage_tries = 0; voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); + intel_get_adjust_train(intel_dp, link_status); } intel_dp->DP = DP; @@ -1637,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; + uint8_t link_status[DP_LINK_STATUS_SIZE]; if (cr_tries > 5) { DRM_ERROR("failed to train DP, aborting\n"); @@ -1644,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; } - if (IS_GEN6(dev) && is_edp(intel_dp)) { + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1664,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; udelay(400); - if (!intel_dp_get_link_status(intel_dp)) + if (!intel_dp_get_link_status(intel_dp, link_status)) break; /* Make sure clock is still ok */ - if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } - if (intel_channel_eq_ok(intel_dp)) { + if (intel_channel_eq_ok(intel_dp, link_status)) { channel_eq = true; break; } @@ -1689,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) } /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); + intel_get_adjust_train(intel_dp, link_status); ++tries; } @@ -1734,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); - if (is_edp(intel_dp)) - DP |= DP_LINK_TRAIN_OFF; + if (is_edp(intel_dp)) { + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) + DP |= DP_LINK_TRAIN_OFF_CPT; + else + DP |= DP_LINK_TRAIN_OFF; + } if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { @@ -1821,6 +1884,7 @@ static void intel_dp_check_link_status(struct intel_dp *intel_dp) { u8 sink_irq_vector; + u8 link_status[DP_LINK_STATUS_SIZE]; if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1829,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) return; /* Try to read receiver status if the link appears to be up */ - if (!intel_dp_get_link_status(intel_dp)) { + if (!intel_dp_get_link_status(intel_dp, link_status)) { intel_dp_link_down(intel_dp); return; } @@ -1854,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - if (!intel_channel_eq_ok(intel_dp)) { + if (!intel_channel_eq_ok(intel_dp, link_status)) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); @@ -2178,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) continue; intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; } @@ -2320,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> PANEL_LIGHT_ON_DELAY_SHIFT; - + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> PANEL_LIGHT_OFF_DELAY_SHIFT; @@ -2353,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; - ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_get_dpcd(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); + if (ret) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) dev_priv->no_aux_handshake = diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 9ed5380e5a53..d30ccccb9d73 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -28,6 +28,7 @@ */ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/export.h> #include "drmP.h" #include "drm.h" #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 499d4c0dbeeb..21f60b7d69a3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd) static int intel_panel_get_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); - return intel_panel_get_backlight(dev); + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->backlight_level; } static const struct backlight_ops intel_panel_bl_ops = { diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6db3b1ccb6eb..3003fb25aefd 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -28,6 +28,7 @@ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/export.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 42d31874edf2..33daa29eea66 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -29,6 +29,8 @@ * Gareth Hughes <gareth@valinux.com> */ +#include <linux/module.h> + #include "drmP.h" #include "drm.h" #include "mga_drm.h" diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c index f172bd5c257f..722a91b69b0c 100644 --- a/drivers/gpu/drm/mga/mga_warp.c +++ b/drivers/gpu/drm/mga/mga_warp.c @@ -30,6 +30,7 @@ #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/platform_device.h> +#include <linux/module.h> #include "drmP.h" #include "drm.h" diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 032a82098136..5fc201b49d30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -640,10 +640,9 @@ static int nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t reg0 = nv_rd32(dev, reg + 0); - uint32_t reg1 = nv_rd32(dev, reg + 4); struct nouveau_pll_vals pll; struct pll_lims pll_limits; + u32 ctrl, mask, coef; int ret; ret = get_pll_limits(dev, reg, &pll_limits); @@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) if (!clk) return -ERANGE; - reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); - reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; - - if (dev_priv->vbios.execute) { - still_alive(); - nv_wr32(dev, reg + 4, reg1); - nv_wr32(dev, reg + 0, reg0); + coef = pll.N1 << 8 | pll.M1; + ctrl = pll.log2P << 16; + mask = 0x00070000; + if (reg == 0x004008) { + mask |= 0x01f80000; + ctrl |= (pll_limits.log2p_bias << 19); + ctrl |= (pll.log2P << 22); } + if (!dev_priv->vbios.execute) + return 0; + + nv_mask(dev, reg + 0, mask, ctrl); + nv_wr32(dev, reg + 4, coef); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7226f419e178..7cc37e690860 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) if (dev_priv->card_type == NV_10 && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && - nvbo->bo.mem.num_pages < vram_pages / 2) { + nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled * by independent memory controller units. Up to a 9x diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a319d5646ea9..bb6ec9ef8676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, INIT_LIST_HEAD(&chan->nvsw.vbl_wait); INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); + spin_lock_init(&chan->fence.lock); /* setup channel's memory and vm */ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index e0d275e1c96c..cea6696b1906 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, case OUTPUT_DP: max_clock = nv_encoder->dp.link_nr; max_clock *= nv_encoder->dp.link_bw; - clock = clock * nouveau_connector_bpp(connector) / 8; + clock = clock * nouveau_connector_bpp(connector) / 10; break; default: BUG_ON(1); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index ddbabefb4273..b12fd2c80812 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, spin_unlock_irqrestore(&dev->event_lock, flags); return 0; } + +int +nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct nouveau_bo *bo; + int ret; + + args->pitch = roundup(args->width * (args->bpp / 8), 256); + args->size = args->pitch * args->height; + args->size = roundup(args->size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo); + if (ret) + return ret; + + ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); + drm_gem_object_unreference_unlocked(bo->gem); + return ret; +} + +int +nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} + +int +nouveau_display_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *poffset) +{ + struct drm_gem_object *gem; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (gem) { + struct nouveau_bo *bo = gem->driver_private; + *poffset = bo->bo.addr_space_offset; + drm_gem_object_unreference_unlocked(gem); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index c1e01f37b9d1..9791d13c9e3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -23,6 +23,7 @@ */ #include <linux/console.h> +#include <linux/module.h> #include "drmP.h" #include "drm.h" @@ -432,6 +433,10 @@ static struct drm_driver driver = { .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, + .dumb_create = nouveau_display_dumb_create, + .dumb_map_offset = nouveau_display_dumb_map_offset, + .dumb_destroy = nouveau_display_dumb_destroy, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 29837da1098b..4c0be3a4ed88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event); int nouveau_finish_page_flip(struct nouveau_channel *, struct nouveau_page_flip_state *); +int nouveau_display_dumb_create(struct drm_file *, struct drm_device *, + struct drm_mode_create_dumb *args); +int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, + uint32_t handle, uint64_t *offset); +int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *, + uint32_t handle); /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 14a8627efe4d..3a4cc32b9e44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fbdev *nfbdev; + int preferred_bpp; int ret; nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); @@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev) } drm_fb_helper_single_add_all_connectors(&nfbdev->helper); - drm_fb_helper_initial_config(&nfbdev->helper, 32); + + if (dev_priv->vram_size <= 32 * 1024 * 1024) + preferred_bpp = 8; + else if (dev_priv->vram_size <= 64 * 1024 * 1024) + preferred_bpp = 16; + else + preferred_bpp = 32; + + drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 81116cfea275..2f6daae68b9d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) return ret; } - INIT_LIST_HEAD(&chan->fence.pending); - spin_lock_init(&chan->fence.lock); atomic_set(&chan->fence.last_sequence_irq, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index f6a27fabcfe0..d39b2202b197 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -22,6 +22,8 @@ * Authors: Ben Skeggs */ +#include <linux/module.h> + #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_i2c.h" @@ -331,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); - for (i = 0; info[i].addr; i++) { + for (i = 0; i2c && info[i].addr; i++) { if (nouveau_probe_i2c_addr(i2c, info[i].addr) && (!match || match(i2c, &info[i]))) { NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 02222c540aee..960c0ae0c0c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) return ret; } - ret = drm_mm_init(&chan->ramin_heap, base, size); + ret = drm_mm_init(&chan->ramin_heap, base, size - base); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); nouveau_gpuobj_ref(NULL, &chan->ramin); diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 9f178aa94162..33d03fbf00df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev) if(version == 0x15) { memtimings->timing = kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); - if(!memtimings) { + if (!memtimings->timing) { NV_WARN(dev,"Could not allocate memtiming table\n"); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b75258a9fe44..c8a463b76c89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be) pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } + nvbe->unmap_pages = false; } + + nvbe->pages = NULL; } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 82478e0998e5..d8831ab42bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_display_early; + /* workaround an odd issue on nvc1 by disabling the device's + * nosnoop capability. hopefully won't cause issues until a + * better fix is found - assuming there is one... + */ + if (dev_priv->chipset == 0xc1) { + nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); + } + nouveau_pm_init(dev); ret = engine->vram.init(dev); @@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->noaccel = !!nouveau_noaccel; if (nouveau_noaccel == -1) { switch (dev_priv->chipset) { - case 0xc1: /* known broken */ - case 0xc8: /* never tested */ +#if 0 + case 0xXX: /* known broken */ NV_INFO(dev, "acceleration disabled by default, pass " "noaccel=0 to force enable\n"); dev_priv->noaccel = true; break; +#endif default: dev_priv->noaccel = false; break; diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 081ca7b03e8a..5a46446dd5a8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -22,6 +22,8 @@ * Authors: Martin Peres */ +#include <linux/module.h> + #include "drmP.h" #include "nouveau_drv.h" diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index bbc0b9c7e1f7..e676b0d53478 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c @@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg) int P = (ctrl & 0x00070000) >> 16; u32 ref = 27000, clk = 0; - if (ctrl & 0x80000000) + if ((ctrl & 0x80000000) && M1) { clk = ref * N1 / M1; - - if (!(ctrl & 0x00000100)) { - if (ctrl & 0x40000000) - clk = clk * N2 / M2; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + clk = clk * N2 / M2; + else + clk = 0; + } } return clk >> P; @@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) } /* memory clock */ + if (!perflvl->memory) { + info->mpll_ctrl = 0x00000000; + goto out; + } + ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, &N1, &M1, &N2, &M2, &log2P); if (ret < 0) @@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) mdelay(5); nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); + if (!info->mpll_ctrl) + goto resume; + /* wait for vblank start on active crtcs, disable memory access */ for (i = 0; i < 2; i++) { if (!(crtc_mask & (1 << i))) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index d23ca00e7d62..06de250fe617 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), mc; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); disp->irq.dcb = NULL; @@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev) struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; struct dcb_entry *dcb; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); dcb = disp->irq.dcb; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8c979b31ff61..ac601f7c4e1a 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine) NV_DEBUG(dev, "\n"); /* master reset */ - nv_mask(dev, 0x000200, 0x00200100, 0x00000000); - nv_mask(dev, 0x000200, 0x00200100, 0x00200100); + nv_mask(dev, 0x000200, 0x00201000, 0x00000000); + nv_mask(dev, 0x000200, 0x00201000, 0x00201000); nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ /* reset/enable traps and interrupts */ diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d05c2c3b2444..4b46d6968566 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, offset + 0x1c, 0x00880000); break; case 0x86: - gr_def(ctx, offset + 0x1c, 0x008c0000); + gr_def(ctx, offset + 0x1c, 0x018c0000); break; case 0x92: case 0x96: diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 9da23838e63e..2e45e57fd869 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev) colbits = (r4 & 0x0000f000) >> 12; rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = ((r4 & 0x01000000) ? 8 : 4); + banks = 1 << (((r4 & 0x03000000) >> 24) + 2); rowsize = parts * banks * (1 << colbits) * 8; predicted = rowsize << rowbitsa; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 4b8d0b3f7d2b..ecfafd70cf0e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -23,6 +23,7 @@ */ #include <linux/firmware.h> +#include <linux/module.h> #include "drmP.h" @@ -156,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i = 0, gpc, tp, ret; - u32 magic; ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, &grch->unk408004); @@ -206,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) nv_wo32(grch->mmio, i++ * 4, 0x0041880c); nv_wo32(grch->mmio, i++ * 4, 0x80000018); - magic = 0x02180000; - nv_wo32(grch->mmio, i++ * 4, 0x00405830); - nv_wo32(grch->mmio, i++ * 4, magic); - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { - u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); - nv_wo32(grch->mmio, i++ * 4, reg); - nv_wo32(grch->mmio, i++ * 4, magic); + if (dev_priv->chipset != 0xc1) { + u32 magic = 0x02180000; + nv_wo32(grch->mmio, i++ * 4, 0x00405830); + nv_wo32(grch->mmio, i++ * 4, magic); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x520); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, magic); + magic += 0x0324; + } + } + } else { + u32 magic = 0x02180000; + nv_wo32(grch->mmio, i++ * 4, 0x00405830); + nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218); + nv_wo32(grch->mmio, i++ * 4, 0x004064c4); + nv_wo32(grch->mmio, i++ * 4, 0x0086ffff); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x520); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic); + magic += 0x0324; + } + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x544); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, magic); + magic += 0x0324; + } } } @@ -357,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) u8 tpnr[GPC_MAX]; int i, gpc, tpc; + nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */ + /* * TP ROP UNKVAL(magic_not_rop_nr) * 450: 4/0/0/0 2 3 diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index dd0e6a736b3b..96b0b93d94ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) /* calculate first set of magics */ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); + gpc = -1; for (tp = 0; tp < priv->tp_total; tp++) { do { gpc = (gpc + 1) % priv->gpc_nr; @@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan) if (1) { u32 tp_mask = 0, tp_set = 0; - u8 tpnr[GPC_MAX]; + u8 tpnr[GPC_MAX], a, b; memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); for (gpc = 0; gpc < priv->gpc_nr; gpc++) tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); - gpc = -1; - for (i = 0, gpc = -1; i < 32; i++) { - int ltp = i * (priv->tp_total - 1) / 32; - - do { - gpc = (gpc + 1) % priv->gpc_nr; - } while (!tpnr[gpc]); - tp = priv->tp_nr[gpc] - tpnr[gpc]--; + for (i = 0, gpc = -1, b = -1; i < 32; i++) { + a = (i * (priv->tp_total - 1)) / 32; + if (a != b) { + b = a; + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpnr[gpc]); + tp = priv->tp_nr[gpc] - tpnr[gpc]--; - tp_set |= 1 << ((gpc * 8) + tp); + tp_set |= 1 << ((gpc * 8) + tp); + } - do { - nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); - tp_set ^= tp_mask; - nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); - tp_set ^= tp_mask; - } while (ltp == (++i * (priv->tp_total - 1) / 32)); - i--; + nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); + nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask); } } diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index edbfe9360ae2..ce984d573a51 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c @@ -43,7 +43,7 @@ static const u8 types[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, - 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, + 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 @@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev) u32 bsize = nv_rd32(dev, 0x10f20c); u32 offset, length; bool uniform = true; - int ret, i; + int ret, part; NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); /* read amount of vram attached to each memory controller */ - for (i = 0; i < parts; i++) { - u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); + part = 0; + while (parts) { + u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000)); + if (psize == 0) + continue; + parts--; + if (psize != bsize) { if (psize < bsize) bsize = psize; uniform = false; } - NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); - + NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); dev_priv->vram_size += (u64)psize << 20; } diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index 23d63b4b3d77..cb006a718e70 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode) continue; if (nv_partner != nv_encoder && - nv_partner->dcb->or == nv_encoder->or) { + nv_partner->dcb->or == nv_encoder->dcb->or) { if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) return; break; diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 570e190710bd..bcac90b543ad 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -32,6 +32,7 @@ #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/module.h> #include "drmP.h" #include "drm.h" diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index b9e8efd2b754..4c8796ba6dd8 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -29,6 +29,8 @@ * Gareth Hughes <gareth@valinux.com> */ +#include <linux/module.h> + #include "drmP.h" #include "drm.h" #include "r128_drm.h" diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 9f363e0c4b60..cf8b4bc3e73d 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -70,7 +70,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ - radeon_trace_points.o ni.o cayman_blit_shaders.o + radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a515b2a09d85..2b97262e3ab1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, bpc = connector->display_info.bpc; encoder_mode = atombios_get_encoder_mode(encoder); if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || - radeon_encoder_is_dp_bridge(encoder)) { + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = @@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (ss_enabled && ss->percentage) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; - if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || - radeon_encoder_is_dp_bridge(encoder)) { + if (ENCODER_MODE_IS_DP(encoder_mode)) { + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_COHERENT_MODE; + /* 16200 or 27000 */ + args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); + } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (encoder_mode == ATOM_ENCODER_MODE_DP) { + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) + /* deep color support */ + args.v3.sInput.usPixelClock = + cpu_to_le16((mode->clock * bpc / 8) / 10); + if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; - /* 16200 or 27000 */ - args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); - } else { - if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { - /* deep color support */ - args.v3.sInput.usPixelClock = - cpu_to_le16((mode->clock * bpc / 8) / 10); - } - if (dig->coherent_mode) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_COHERENT_MODE; - if (mode->clock > 165000) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_DUAL_LINK; - } - } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (encoder_mode == ATOM_ENCODER_MODE_DP) { + if (mode->clock > 165000) args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_COHERENT_MODE; - /* 16200 or 27000 */ - args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); - } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { - if (mode->clock > 165000) - args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_DUAL_LINK; - } + DISPPLL_CONFIG_DUAL_LINK; } - if (radeon_encoder_is_dp_bridge(encoder)) { - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); - args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; - } else + if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE) + args.v3.sInput.ucExtTransmitterID = + radeon_encoder_get_dp_bridge_encoder_id(encoder); + else args.v3.sInput.ucExtTransmitterID = 0; atom_execute_table(rdev->mode_info.atom_context, @@ -945,6 +930,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode bpc = connector->display_info.bpc; switch (encoder_mode) { + case ATOM_ENCODER_MODE_DP_MST: case ATOM_ENCODER_MODE_DP: /* DP/eDP */ dp_clock = dig_connector->dp_clock / 10; @@ -1121,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & RADEON_TILING_MACRO) + if (tiling_flags & RADEON_TILING_MACRO) { + if (rdev->family >= CHIP_CAYMAN) + tmp = rdev->config.cayman.tile_config; + else + tmp = rdev->config.evergreen.tile_config; + + switch ((tmp & 0xf0) >> 4) { + case 0: /* 4 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); + break; + case 1: /* 8 banks */ + default: + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); + break; + case 2: /* 16 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: /* 1KB rows */ + default: + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB); + break; + case 1: /* 2KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB); + break; + case 2: /* 4KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB); + break; + } + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); - else if (tiling_flags & RADEON_TILING_MICRO) + } else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); switch (radeon_crtc->crtc_id) { @@ -1450,7 +1467,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * PPLL/DCPLL programming and only program the DP DTO for the * crtc virtual pixel clock. */ - if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) return ATOM_PPLL_INVALID; } @@ -1536,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 79e8ebc05307..6fb335a4fdda 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } } - DRM_ERROR("aux i2c too many retries, giving up\n"); + DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); return -EREMOTEIO; } @@ -482,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, int bpp = convert_bpc_to_bpp(connector->display_info.bpc); int lane_num, max_pix_clock; - if (radeon_connector_encoder_is_dp_bridge(connector)) + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) return 270000; lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); @@ -553,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; if (!ASIC_IS_DCE4(rdev)) return; - if (radeon_connector_encoder_is_dp_bridge(connector)) + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_NUTMEG) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_TRAVIS) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + } atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP_PANEL_MODE, panel_mode); + + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) && + (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { + radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1); + } } void radeon_dp_set_link_config(struct drm_connector *connector, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c new file mode 100644 index 000000000000..39c04c1b8472 --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -0,0 +1,2369 @@ +/* + * Copyright 2007-11 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "radeon_drm.h" +#include "radeon.h" +#include "atom.h" + +extern int atom_debug; + +/* evil but including atombios.h is much worse */ +bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, + struct drm_display_mode *mode); + + +static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + return true; + default: + return false; + } +} + +static struct drm_connector * +radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + radeon_connector = to_radeon_connector(connector); + if (radeon_encoder->devices & radeon_connector->devices) + return connector; + } + return NULL; +} + +static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + + /* set the active encoder to connector routing */ + radeon_encoder_set_active_device(encoder); + drm_mode_set_crtcinfo(adjusted_mode, 0); + + /* hw bug */ + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) + && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + + /* get the native mode for LVDS */ + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) + radeon_panel_mode_fixup(encoder, adjusted_mode); + + /* get the native mode for TV */ + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { + struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; + if (tv_dac) { + if (tv_dac->tv_std == TV_STD_NTSC || + tv_dac->tv_std == TV_STD_NTSC_J || + tv_dac->tv_std == TV_STD_PAL_M) + radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); + else + radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); + } + } + + if (ASIC_IS_DCE3(rdev) && + ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + radeon_dp_set_link_config(connector, mode); + } + + return true; +} + +static void +atombios_dac_setup(struct drm_encoder *encoder, int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + DAC_ENCODER_CONTROL_PS_ALLOCATION args; + int index = 0; + struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; + + memset(&args, 0, sizeof(args)); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); + break; + } + + args.ucAction = action; + + if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) + args.ucDacStandard = ATOM_DAC1_PS2; + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.ucDacStandard = ATOM_DAC1_CV; + else { + switch (dac_info->tv_std) { + case TV_STD_PAL: + case TV_STD_PAL_M: + case TV_STD_SCART_PAL: + case TV_STD_SECAM: + case TV_STD_PAL_CN: + args.ucDacStandard = ATOM_DAC1_PAL; + break; + case TV_STD_NTSC: + case TV_STD_NTSC_J: + case TV_STD_PAL_60: + default: + args.ucDacStandard = ATOM_DAC1_NTSC; + break; + } + } + args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + +} + +static void +atombios_tv_setup(struct drm_encoder *encoder, int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + TV_ENCODER_CONTROL_PS_ALLOCATION args; + int index = 0; + struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; + + memset(&args, 0, sizeof(args)); + + index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); + + args.sTVEncoder.ucAction = action; + + if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.sTVEncoder.ucTvStandard = ATOM_TV_CV; + else { + switch (dac_info->tv_std) { + case TV_STD_NTSC: + args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; + break; + case TV_STD_PAL: + args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; + break; + case TV_STD_PAL_M: + args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; + break; + case TV_STD_PAL_60: + args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; + break; + case TV_STD_NTSC_J: + args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; + break; + case TV_STD_SCART_PAL: + args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ + break; + case TV_STD_SECAM: + args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; + break; + case TV_STD_PAL_CN: + args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; + break; + default: + args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; + break; + } + } + + args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + +} + +union dvo_encoder_control { + ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; + DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; + DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; +}; + +void +atombios_dvo_setup(struct drm_encoder *encoder, int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + union dvo_encoder_control args; + int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); + uint8_t frev, crev; + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + switch (crev) { + case 1: + /* R4xx, R5xx */ + args.ext_tmds.sXTmdsEncoder.ucEnable = action; + + if (radeon_encoder->pixel_clock > 165000) + args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; + + args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; + break; + case 2: + /* RS600/690/740 */ + args.dvo.sDVOEncoder.ucAction = action; + args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + /* DFP1, CRT1, TV1 depending on the type of port */ + args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; + + if (radeon_encoder->pixel_clock > 165000) + args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; + break; + case 3: + /* R6xx */ + args.dvo_v3.ucAction = action; + args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.dvo_v3.ucDVOConfig = 0; /* XXX */ + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +union lvds_encoder_control { + LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; + LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; +}; + +void +atombios_digital_setup(struct drm_encoder *encoder, int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + union lvds_encoder_control args; + int index = 0; + int hdmi_detected = 0; + uint8_t frev, crev; + + if (!dig) + return; + + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + hdmi_detected = 1; + + memset(&args, 0, sizeof(args)); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); + else + index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); + break; + } + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + case 2: + switch (crev) { + case 1: + args.v1.ucMisc = 0; + args.v1.ucAction = action; + if (hdmi_detected) + args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; + args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) + args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; + } else { + if (dig->linkb) + args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; + if (radeon_encoder->pixel_clock > 165000) + args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; + /*if (pScrn->rgbBits == 8) */ + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; + } + break; + case 2: + case 3: + args.v2.ucMisc = 0; + args.v2.ucAction = action; + if (crev == 3) { + if (dig->coherent_mode) + args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; + } + if (hdmi_detected) + args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; + args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v2.ucTruncate = 0; + args.v2.ucSpatial = 0; + args.v2.ucTemporal = 0; + args.v2.ucFRC = 0; + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) + args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; + if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { + args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) + args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; + } + if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { + args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) + args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; + if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) + args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; + } + } else { + if (dig->linkb) + args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; + if (radeon_encoder->pixel_clock > 165000) + args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +int +atombios_get_encoder_mode(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + /* dp bridges are always DP */ + if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) + return ATOM_ENCODER_MODE_DP; + + /* DVO is always DVO */ + if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) + return ATOM_ENCODER_MODE_DVO; + + connector = radeon_get_connector_for_encoder(encoder); + /* if we don't have an active device yet, just use one of + * the connectors tied to the encoder. + */ + if (!connector) + connector = radeon_get_connector_for_encoder_init(encoder); + radeon_connector = to_radeon_connector(connector); + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ + if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else if (radeon_connector->use_digital) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_CRT; + break; + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_HDMIA: + default: + if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else + return ATOM_ENCODER_MODE_DVI; + break; + case DRM_MODE_CONNECTOR_LVDS: + return ATOM_ENCODER_MODE_LVDS; + break; + case DRM_MODE_CONNECTOR_DisplayPort: + dig_connector = radeon_connector->con_priv; + if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + return ATOM_ENCODER_MODE_DP; + else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else + return ATOM_ENCODER_MODE_DVI; + break; + case DRM_MODE_CONNECTOR_eDP: + return ATOM_ENCODER_MODE_DP; + case DRM_MODE_CONNECTOR_DVIA: + case DRM_MODE_CONNECTOR_VGA: + return ATOM_ENCODER_MODE_CRT; + break; + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_9PinDIN: + /* fix me */ + return ATOM_ENCODER_MODE_TV; + /*return ATOM_ENCODER_MODE_CV;*/ + break; + } +} + +/* + * DIG Encoder/Transmitter Setup + * + * DCE 3.0/3.1 + * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. + * Supports up to 3 digital outputs + * - 2 DIG encoder blocks. + * DIG1 can drive UNIPHY link A or link B + * DIG2 can drive UNIPHY link B or LVTMA + * + * DCE 3.2 + * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). + * Supports up to 5 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * + * DCE 4.0/5.0 + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). + * Supports up to 6 digital outputs + * - 6 DIG encoder blocks. + * - DIG to PHY mapping is hardcoded + * DIG1 drives UNIPHY0 link A, A+B + * DIG2 drives UNIPHY0 link B + * DIG3 drives UNIPHY1 link A, A+B + * DIG4 drives UNIPHY1 link B + * DIG5 drives UNIPHY2 link A, A+B + * DIG6 drives UNIPHY2 link B + * + * DCE 4.1 + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). + * Supports up to 6 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * + * Routing + * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) + * Examples: + * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI + * crtc1 -> dig1 -> UNIPHY0 link B -> DP + * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS + * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI + */ + +union dig_encoder_control { + DIG_ENCODER_CONTROL_PS_ALLOCATION v1; + DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; + DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; + DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; +}; + +void +atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + union dig_encoder_control args; + int index = 0; + uint8_t frev, crev; + int dp_clock = 0; + int dp_lane_count = 0; + int hpd_id = RADEON_HPD_NONE; + int bpc = 8; + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + hpd_id = radeon_connector->hpd.hpd; + bpc = connector->display_info.bpc; + } + + /* no dig encoder assigned */ + if (dig->dig_encoder == -1) + return; + + memset(&args, 0, sizeof(args)); + + if (ASIC_IS_DCE4(rdev)) + index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); + else { + if (dig->dig_encoder) + index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); + else + index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); + } + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + switch (crev) { + case 1: + args.v1.ucAction = action; + args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) + args.v3.ucPanelMode = panel_mode; + else + args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) + args.v1.ucLaneNum = dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v1.ucLaneNum = 8; + else + args.v1.ucLaneNum = 4; + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; + break; + } + if (dig->linkb) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; + else + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; + break; + case 2: + case 3: + args.v3.ucAction = action; + args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) + args.v3.ucPanelMode = panel_mode; + else + args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) + args.v3.ucLaneNum = dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v3.ucLaneNum = 8; + else + args.v3.ucLaneNum = 4; + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; + args.v3.acConfig.ucDigSel = dig->dig_encoder; + switch (bpc) { + case 0: + args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } + break; + case 4: + args.v4.ucAction = action; + args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) + args.v4.ucPanelMode = panel_mode; + else + args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) + args.v4.ucLaneNum = dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v4.ucLaneNum = 8; + else + args.v4.ucLaneNum = 4; + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) { + if (dp_clock == 270000) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; + else if (dp_clock == 540000) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; + } + args.v4.acConfig.ucDigSel = dig->dig_encoder; + switch (bpc) { + case 0: + args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } + if (hpd_id == RADEON_HPD_NONE) + args.v4.ucHPD_ID = 0; + else + args.v4.ucHPD_ID = hpd_id + 1; + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + +} + +union dig_transmitter_control { + DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; +}; + +void +atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector; + union dig_transmitter_control args; + int index = 0; + uint8_t frev, crev; + bool is_dp = false; + int pll_id = 0; + int dp_clock = 0; + int dp_lane_count = 0; + int connector_object_id = 0; + int igp_lane_info = 0; + int dig_encoder = dig->dig_encoder; + + if (action == ATOM_TRANSMITTER_ACTION_INIT) { + connector = radeon_get_connector_for_encoder_init(encoder); + /* just needed to avoid bailing in the encoder check. the encoder + * isn't used for init + */ + dig_encoder = 0; + } else + connector = radeon_get_connector_for_encoder(encoder); + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + connector_object_id = + (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + igp_lane_info = dig_connector->igp_lane_info; + } + + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + pll_id = radeon_crtc->pll_id; + } + + /* no dig encoder assigned */ + if (dig_encoder == -1) + return; + + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder))) + is_dp = true; + + memset(&args, 0, sizeof(args)); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); + break; + } + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + switch (crev) { + case 1: + args.v1.ucAction = action; + if (action == ATOM_TRANSMITTER_ACTION_INIT) { + args.v1.usInitInfo = cpu_to_le16(connector_object_id); + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v1.asMode.ucLaneSel = lane_num; + args.v1.asMode.ucLaneSet = lane_set; + } else { + if (is_dp) + args.v1.usPixelClock = + cpu_to_le16(dp_clock / 10); + else if (radeon_encoder->pixel_clock > 165000) + args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); + else + args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + } + + args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; + + if (dig_encoder) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; + + if ((rdev->flags & RADEON_IS_IGP) && + (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { + if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { + if (igp_lane_info & 0x1) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; + else if (igp_lane_info & 0x2) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; + else if (igp_lane_info & 0x4) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; + else if (igp_lane_info & 0x8) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; + } else { + if (igp_lane_info & 0x3) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; + else if (igp_lane_info & 0xc) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; + } + } + + if (dig->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; + + if (is_dp) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; + if (radeon_encoder->pixel_clock > 165000) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; + } + break; + case 2: + args.v2.ucAction = action; + if (action == ATOM_TRANSMITTER_ACTION_INIT) { + args.v2.usInitInfo = cpu_to_le16(connector_object_id); + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v2.asMode.ucLaneSel = lane_num; + args.v2.asMode.ucLaneSet = lane_set; + } else { + if (is_dp) + args.v2.usPixelClock = + cpu_to_le16(dp_clock / 10); + else if (radeon_encoder->pixel_clock > 165000) + args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); + else + args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + } + + args.v2.acConfig.ucEncoderSel = dig_encoder; + if (dig->linkb) + args.v2.acConfig.ucLinkSel = 1; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v2.acConfig.ucTransmitterSel = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v2.acConfig.ucTransmitterSel = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v2.acConfig.ucTransmitterSel = 2; + break; + } + + if (is_dp) { + args.v2.acConfig.fCoherentMode = 1; + args.v2.acConfig.fDPConnector = 1; + } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v2.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v2.acConfig.fDualLinkConnector = 1; + } + break; + case 3: + args.v3.ucAction = action; + if (action == ATOM_TRANSMITTER_ACTION_INIT) { + args.v3.usInitInfo = cpu_to_le16(connector_object_id); + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v3.asMode.ucLaneSel = lane_num; + args.v3.asMode.ucLaneSet = lane_set; + } else { + if (is_dp) + args.v3.usPixelClock = + cpu_to_le16(dp_clock / 10); + else if (radeon_encoder->pixel_clock > 165000) + args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); + else + args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + } + + if (is_dp) + args.v3.ucLaneNum = dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v3.ucLaneNum = 8; + else + args.v3.ucLaneNum = 4; + + if (dig->linkb) + args.v3.acConfig.ucLinkSel = 1; + if (dig_encoder & 1) + args.v3.acConfig.ucEncoderSel = 1; + + /* Select the PLL for the PHY + * DP PHY should be clocked from external src if there is + * one. + */ + /* On DCE4, if there is an external clock, it generates the DP ref clock */ + if (is_dp && rdev->clock.dp_extclk) + args.v3.acConfig.ucRefClkSource = 2; /* external src */ + else + args.v3.acConfig.ucRefClkSource = pll_id; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v3.acConfig.ucTransmitterSel = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v3.acConfig.ucTransmitterSel = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v3.acConfig.ucTransmitterSel = 2; + break; + } + + if (is_dp) + args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v3.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v3.acConfig.fDualLinkConnector = 1; + } + break; + case 4: + args.v4.ucAction = action; + if (action == ATOM_TRANSMITTER_ACTION_INIT) { + args.v4.usInitInfo = cpu_to_le16(connector_object_id); + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v4.asMode.ucLaneSel = lane_num; + args.v4.asMode.ucLaneSet = lane_set; + } else { + if (is_dp) + args.v4.usPixelClock = + cpu_to_le16(dp_clock / 10); + else if (radeon_encoder->pixel_clock > 165000) + args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); + else + args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + } + + if (is_dp) + args.v4.ucLaneNum = dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v4.ucLaneNum = 8; + else + args.v4.ucLaneNum = 4; + + if (dig->linkb) + args.v4.acConfig.ucLinkSel = 1; + if (dig_encoder & 1) + args.v4.acConfig.ucEncoderSel = 1; + + /* Select the PLL for the PHY + * DP PHY should be clocked from external src if there is + * one. + */ + /* On DCE5 DCPLL usually generates the DP ref clock */ + if (is_dp) { + if (rdev->clock.dp_extclk) + args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; + else + args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; + } else + args.v4.acConfig.ucRefClkSource = pll_id; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v4.acConfig.ucTransmitterSel = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v4.acConfig.ucTransmitterSel = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v4.acConfig.ucTransmitterSel = 2; + break; + } + + if (is_dp) + args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */ + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v4.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v4.acConfig.fDualLinkConnector = 1; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + break; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +bool +atombios_set_edp_panel_power(struct drm_connector *connector, int action) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + union dig_transmitter_control args; + int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); + uint8_t frev, crev; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) + goto done; + + if (!ASIC_IS_DCE4(rdev)) + goto done; + + if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && + (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) + goto done; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + goto done; + + memset(&args, 0, sizeof(args)); + + args.v1.ucAction = action; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + /* wait for the panel to power up */ + if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { + int i; + + for (i = 0; i < 300; i++) { + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + return true; + mdelay(1); + } + return false; + } +done: + return true; +} + +union external_encoder_control { + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; +}; + +static void +atombios_external_encoder_setup(struct drm_encoder *encoder, + struct drm_encoder *ext_encoder, + int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); + union external_encoder_control args; + struct drm_connector *connector; + int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); + u8 frev, crev; + int dp_clock = 0; + int dp_lane_count = 0; + int connector_object_id = 0; + u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; + int bpc = 8; + + if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) + connector = radeon_get_connector_for_encoder_init(encoder); + else + connector = radeon_get_connector_for_encoder(encoder); + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + connector_object_id = + (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + bpc = connector->display_info.bpc; + } + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + /* no params on frev 1 */ + break; + case 2: + switch (crev) { + case 1: + case 2: + args.v1.sDigEncoder.ucAction = action; + args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) { + if (dp_clock == 270000) + args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + args.v1.sDigEncoder.ucLaneNum = dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v1.sDigEncoder.ucLaneNum = 8; + else + args.v1.sDigEncoder.ucLaneNum = 4; + break; + case 3: + args.v3.sExtEncoder.ucAction = action; + if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) + args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); + else + args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) { + if (dp_clock == 270000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; + else if (dp_clock == 540000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; + args.v3.sExtEncoder.ucLaneNum = dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v3.sExtEncoder.ucLaneNum = 8; + else + args.v3.sExtEncoder.ucLaneNum = 4; + switch (ext_enum) { + case GRAPH_OBJECT_ENUM_ID1: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; + break; + case GRAPH_OBJECT_ENUM_ID2: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; + break; + case GRAPH_OBJECT_ENUM_ID3: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; + break; + } + switch (bpc) { + case 0: + args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } + break; + default: + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); + return; + } + break; + default: + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); + return; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +static void +atombios_yuv_setup(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + ENABLE_YUV_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); + uint32_t temp, reg; + + memset(&args, 0, sizeof(args)); + + if (rdev->family >= CHIP_R600) + reg = R600_BIOS_3_SCRATCH; + else + reg = RADEON_BIOS_3_SCRATCH; + + /* XXX: fix up scratch reg handling */ + temp = RREG32(reg); + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + WREG32(reg, (ATOM_S3_TV1_ACTIVE | + (radeon_crtc->crtc_id << 18))); + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); + else + WREG32(reg, 0); + + if (enable) + args.ucEnable = ATOM_ENABLE; + args.ucCRTC = radeon_crtc->crtc_id; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + WREG32(reg, temp); +} + +static void +radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; + int index = 0; + + memset(&args, 0, sizeof(args)); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); + else + index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); + else + index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); + else + index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); + break; + default: + return; + } + + switch (mode) { + case DRM_MODE_DPMS_ON: + args.ucAction = ATOM_ENABLE; + /* workaround for DVOOutputControl on some RS690 systems */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { + u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); + WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + WREG32(RADEON_BIOS_3_SCRATCH, reg); + } else + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.ucAction = ATOM_LCD_BLON; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + args.ucAction = ATOM_DISABLE; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.ucAction = ATOM_LCD_BLOFF; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } + break; + } +} + +static void +radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_connector *radeon_connector = NULL; + struct radeon_connector_atom_dig *radeon_dig_connector = NULL; + + if (connector) { + radeon_connector = to_radeon_connector(connector); + radeon_dig_connector = radeon_connector->con_priv; + } + + switch (mode) { + case DRM_MODE_DPMS_ON: + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + else + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + radeon_dig_connector->edp_on = true; + } + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); + radeon_dp_link_train(encoder, connector); + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); + } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_OFF); + radeon_dig_connector->edp_on = false; + } + } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); + break; + } +} + +static void +radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder, + struct drm_encoder *ext_encoder, + int mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + + switch (mode) { + case DRM_MODE_DPMS_ON: + default: + if (ASIC_IS_DCE41(rdev)) { + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); + } else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + if (ASIC_IS_DCE41(rdev)) { + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); + } else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); + break; + } +} + +static void +radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + + DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", + radeon_encoder->encoder_id, mode, radeon_encoder->devices, + radeon_encoder->active_device); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + radeon_atom_encoder_dpms_avivo(encoder, mode); + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + radeon_atom_encoder_dpms_dig(encoder, mode); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + if (ASIC_IS_DCE5(rdev)) { + switch (mode) { + case DRM_MODE_DPMS_ON: + atombios_dvo_setup(encoder, ATOM_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + atombios_dvo_setup(encoder, ATOM_DISABLE); + break; + } + } else if (ASIC_IS_DCE3(rdev)) + radeon_atom_encoder_dpms_dig(encoder, mode); + else + radeon_atom_encoder_dpms_avivo(encoder, mode); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + if (ASIC_IS_DCE5(rdev)) { + switch (mode) { + case DRM_MODE_DPMS_ON: + atombios_dac_setup(encoder, ATOM_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + atombios_dac_setup(encoder, ATOM_DISABLE); + break; + } + } else + radeon_atom_encoder_dpms_avivo(encoder, mode); + break; + default: + return; + } + + if (ext_encoder) + radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode); + + radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +} + +union crtc_source_param { + SELECT_CRTC_SOURCE_PS_ALLOCATION v1; + SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; +}; + +static void +atombios_set_encoder_crtc_source(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + union crtc_source_param args; + int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); + uint8_t frev, crev; + struct radeon_encoder_atom_dig *dig; + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + switch (crev) { + case 1: + default: + if (ASIC_IS_AVIVO(rdev)) + args.v1.ucCRTC = radeon_crtc->crtc_id; + else { + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { + args.v1.ucCRTC = radeon_crtc->crtc_id; + } else { + args.v1.ucCRTC = radeon_crtc->crtc_id << 2; + } + } + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; + break; + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) + args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; + else + args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; + break; + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; + else + args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; + else + args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; + break; + } + break; + case 2: + args.v2.ucCRTC = radeon_crtc->crtc_id; + if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; + else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } else + args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + dig = radeon_encoder->enc_priv; + switch (dig->dig_encoder) { + case 0: + args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; + break; + case 1: + args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; + break; + case 2: + args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; + break; + case 3: + args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; + break; + case 4: + args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; + break; + case 5: + args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; + break; + } + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; + else + args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; + else + args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; + break; + } + break; + } + break; + default: + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); + return; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + /* update scratch regs with new routing */ + radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); +} + +static void +atombios_apply_encoder_quirks(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + + /* Funky macbooks */ + if ((dev->pdev->device == 0x71C5) && + (dev->pdev->subsystem_vendor == 0x106b) && + (dev->pdev->subsystem_device == 0x0080)) { + if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { + uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); + + lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; + lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; + + WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); + } + } + + /* set scaler clears this on some chips */ + if (ASIC_IS_AVIVO(rdev) && + (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { + if (ASIC_IS_DCE4(rdev)) { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, + EVERGREEN_INTERLEAVE_EN); + else + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); + } else { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, + AVIVO_D1MODE_INTERLEAVE_EN); + else + WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); + } + } +} + +static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *test_encoder; + struct radeon_encoder_atom_dig *dig; + uint32_t dig_enc_in_use = 0; + + /* DCE4/5 */ + if (ASIC_IS_DCE4(rdev)) { + dig = radeon_encoder->enc_priv; + if (ASIC_IS_DCE41(rdev)) { + /* ontario follows DCE4 */ + if (rdev->family == CHIP_PALM) { + if (dig->linkb) + return 1; + else + return 0; + } else + /* llano follows DCE3.2 */ + return radeon_crtc->crtc_id; + } else { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } + } + } + + /* on DCE32 and encoder can driver any block so just crtc id */ + if (ASIC_IS_DCE32(rdev)) { + return radeon_crtc->crtc_id; + } + + /* on DCE3 - LVTMA can only be driven by DIGB */ + list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_test_encoder; + + if (encoder == test_encoder) + continue; + + if (!radeon_encoder_is_digital(test_encoder)) + continue; + + radeon_test_encoder = to_radeon_encoder(test_encoder); + dig = radeon_test_encoder->enc_priv; + + if (dig->dig_encoder >= 0) + dig_enc_in_use |= (1 << dig->dig_encoder); + } + + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { + if (dig_enc_in_use & 0x2) + DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); + return 1; + } + if (!(dig_enc_in_use & 1)) + return 0; + return 1; +} + +/* This only needs to be called once at startup */ +void +radeon_atom_encoder_init(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); + break; + default: + break; + } + + if (ext_encoder && ASIC_IS_DCE41(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); + } +} + +static void +radeon_atom_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + + radeon_encoder->pixel_clock = adjusted_mode->clock; + + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { + if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) + atombios_yuv_setup(encoder, true); + else + atombios_yuv_setup(encoder, false); + } + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } + break; + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + atombios_dvo_setup(encoder, ATOM_ENABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + atombios_dac_setup(encoder, ATOM_ENABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + atombios_tv_setup(encoder, ATOM_ENABLE); + else + atombios_tv_setup(encoder, ATOM_DISABLE); + } + break; + } + + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); + else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + } + + atombios_apply_encoder_quirks(encoder, adjusted_mode); + + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + r600_hdmi_enable(encoder); + r600_hdmi_setmode(encoder, adjusted_mode); + } +} + +static bool +atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | + ATOM_DEVICE_CV_SUPPORT | + ATOM_DEVICE_CRT_SUPPORT)) { + DAC_LOAD_DETECTION_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); + uint8_t frev, crev; + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return false; + + args.sDacload.ucMisc = 0; + + if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || + (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) + args.sDacload.ucDacType = ATOM_DAC_A; + else + args.sDacload.ucDacType = ATOM_DAC_B; + + if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) + args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); + else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) + args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); + else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { + args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); + if (crev >= 3) + args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; + } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { + args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); + if (crev >= 3) + args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + return true; + } else + return false; +} + +static enum drm_connector_status +radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + uint32_t bios_0_scratch; + + if (!atombios_dac_load_detect(encoder, connector)) { + DRM_DEBUG_KMS("detect returned false \n"); + return connector_status_unknown; + } + + if (rdev->family >= CHIP_R600) + bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); + else + bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); + + DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); + if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { + if (bios_0_scratch & ATOM_S0_CRT1_MASK) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { + if (bios_0_scratch & ATOM_S0_CRT2_MASK) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { + if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { + if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) + return connector_status_connected; /* CTV */ + else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) + return connector_status_connected; /* STV */ + } + return connector_status_disconnected; +} + +static enum drm_connector_status +radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + u32 bios_0_scratch; + + if (!ASIC_IS_DCE4(rdev)) + return connector_status_unknown; + + if (!ext_encoder) + return connector_status_unknown; + + if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) + return connector_status_unknown; + + /* load detect on the dp bridge */ + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); + + bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); + + DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); + if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { + if (bios_0_scratch & ATOM_S0_CRT1_MASK) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { + if (bios_0_scratch & ATOM_S0_CRT2_MASK) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { + if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) + return connector_status_connected; + } + if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { + if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) + return connector_status_connected; /* CTV */ + else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) + return connector_status_connected; /* STV */ + } + return connector_status_disconnected; +} + +void +radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) +{ + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + + if (ext_encoder) + /* ddc_setup on the dp bridge */ + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); + +} + +static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + + if ((radeon_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + if (dig) + dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); + } + + radeon_atom_output_lock(encoder, true); + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + /* select the clock/data port if it uses a router */ + if (radeon_connector->router.cd_valid) + radeon_router_select_cd_port(radeon_connector); + + /* turn eDP panel on for mode set */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } + + /* this is needed for the pll/ss setup to work correctly in some cases */ + atombios_set_encoder_crtc_source(encoder); +} + +static void radeon_atom_encoder_commit(struct drm_encoder *encoder) +{ + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + radeon_atom_output_lock(encoder, false); +} + +static void radeon_atom_encoder_disable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig; + + /* check for pre-DCE3 cards with shared encoders; + * can't really use the links individually, so don't disable + * the encoder if it's in use by another connector + */ + if (!ASIC_IS_DCE3(rdev)) { + struct drm_encoder *other_encoder; + struct radeon_encoder *other_radeon_encoder; + + list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { + other_radeon_encoder = to_radeon_encoder(other_encoder); + if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && + drm_helper_encoder_in_use(other_encoder)) + goto disable_done; + } + } + + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + if (ASIC_IS_DCE4(rdev)) + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } + break; + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + atombios_dvo_setup(encoder, ATOM_DISABLE); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + atombios_dac_setup(encoder, ATOM_DISABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + atombios_tv_setup(encoder, ATOM_DISABLE); + break; + } + +disable_done: + if (radeon_encoder_is_digital(encoder)) { + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + r600_hdmi_disable(encoder); + dig = radeon_encoder->enc_priv; + dig->dig_encoder = -1; + } + radeon_encoder->active_device = 0; +} + +/* these are handled by the primary encoders */ +static void radeon_atom_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void radeon_atom_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +radeon_atom_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void radeon_atom_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { + .dpms = radeon_atom_ext_dpms, + .mode_fixup = radeon_atom_ext_mode_fixup, + .prepare = radeon_atom_ext_prepare, + .mode_set = radeon_atom_ext_mode_set, + .commit = radeon_atom_ext_commit, + .disable = radeon_atom_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + +static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { + .dpms = radeon_atom_encoder_dpms, + .mode_fixup = radeon_atom_mode_fixup, + .prepare = radeon_atom_encoder_prepare, + .mode_set = radeon_atom_encoder_mode_set, + .commit = radeon_atom_encoder_commit, + .disable = radeon_atom_encoder_disable, + .detect = radeon_atom_dig_detect, +}; + +static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { + .dpms = radeon_atom_encoder_dpms, + .mode_fixup = radeon_atom_mode_fixup, + .prepare = radeon_atom_encoder_prepare, + .mode_set = radeon_atom_encoder_mode_set, + .commit = radeon_atom_encoder_commit, + .detect = radeon_atom_dac_detect, +}; + +void radeon_enc_destroy(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + kfree(radeon_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(radeon_encoder); +} + +static const struct drm_encoder_funcs radeon_atom_enc_funcs = { + .destroy = radeon_enc_destroy, +}; + +struct radeon_encoder_atom_dac * +radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) +{ + struct drm_device *dev = radeon_encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); + + if (!dac) + return NULL; + + dac->tv_std = radeon_atombios_get_tv_info(rdev); + return dac; +} + +struct radeon_encoder_atom_dig * +radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) +{ + int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; + struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); + + if (!dig) + return NULL; + + /* coherent mode by default */ + dig->coherent_mode = true; + dig->dig_encoder = -1; + + if (encoder_enum == 2) + dig->linkb = true; + else + dig->linkb = false; + + return dig; +} + +void +radeon_add_atom_encoder(struct drm_device *dev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + radeon_encoder = to_radeon_encoder(encoder); + if (radeon_encoder->encoder_enum == encoder_enum) { + radeon_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) + return; + + encoder = &radeon_encoder->base; + switch (rdev->num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + radeon_encoder->enc_priv = NULL; + + radeon_encoder->encoder_enum = encoder_enum; + radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + radeon_encoder->devices = supported_device; + radeon_encoder->rmx_type = RMX_OFF; + radeon_encoder->underscan_type = UNDERSCAN_OFF; + radeon_encoder->is_ext_encoder = false; + radeon_encoder->caps = caps; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + radeon_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); + } else { + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); + } + drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC1: + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); + drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_DAC2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); + radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); + drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + radeon_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); + } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); + } else { + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); + } + drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: + /* these are handled by the primary encoders */ + radeon_encoder->is_ext_encoder = true; + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + else + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); + break; + } +} diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index ed406e8404a3..5e00d1670aa9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= EVERGREEN_GRPH_UPDATE_LOCK; @@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev) return actual_temp * 1000; } +void sumo_pm_init_profile(struct radeon_device *rdev) +{ + int idx; + + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + + /* low,mid sh/mh */ + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + + /* high sh/mh */ + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = + rdev->pm.power_state[idx].num_clock_modes - 1; + + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = + rdev->pm.power_state[idx].num_clock_modes - 1; +} + void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -353,6 +409,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) default: break; } + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } if (rdev->irq.installed) evergreen_irq_set(rdev); @@ -893,7 +950,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) u32 tmp; int r; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -945,7 +1002,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) void evergreen_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int r; /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); @@ -965,14 +1021,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void evergreen_pcie_gart_fini(struct radeon_device *rdev) @@ -1226,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); } - WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); if (rdev->flags & RADEON_IS_IGP) { tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; @@ -3031,6 +3080,10 @@ static int evergreen_startup(struct radeon_device *rdev) } } + r = r600_vram_scratch_init(rdev); + if (r) + return r; + evergreen_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); @@ -3235,6 +3288,7 @@ void evergreen_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); + r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_agp_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index dcf11bbc06d9..914e5af84163 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -94,6 +94,15 @@ cp_set_surface_sync(struct radeon_device *rdev, else cp_coher_size = ((size + 255) >> 8); + if (rdev->family >= CHIP_CAYMAN) { + /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync + * to the RB directly. For IBs, the CP programs this as part of the + * surface_sync packet. + */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */ + } radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(rdev, sync_type); radeon_ring_write(rdev, cp_coher_size); @@ -174,7 +183,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, - u64 gpu_addr) + u64 gpu_addr, u32 size) { u32 sq_tex_resource_word0, sq_tex_resource_word1; u32 sq_tex_resource_word4, sq_tex_resource_word7; @@ -196,6 +205,9 @@ set_tex_resource(struct radeon_device *rdev, sq_tex_resource_word7 = format | S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, size, gpu_addr); + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, sq_tex_resource_word0); @@ -613,11 +625,13 @@ int evergreen_blit_init(struct radeon_device *rdev) rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ - rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ + rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ - rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ + rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 74; + if (rdev->family >= CHIP_CAYMAN) + rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */ rdev->r600_blit.max_dim = 16384; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7fdfa8ea7570..cd4590aae154 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -38,6 +38,7 @@ struct evergreen_cs_track { u32 group_size; u32 nbanks; u32 npipes; + u32 row_size; /* value we track */ u32 nsamples; u32 cb_color_base_last[12]; @@ -77,6 +78,44 @@ struct evergreen_cs_track { struct radeon_bo *db_s_write_bo; }; +static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) +{ + if (tiling_flags & RADEON_TILING_MACRO) + return ARRAY_2D_TILED_THIN1; + else if (tiling_flags & RADEON_TILING_MICRO) + return ARRAY_1D_TILED_THIN1; + else + return ARRAY_LINEAR_GENERAL; +} + +static u32 evergreen_cs_get_num_banks(u32 nbanks) +{ + switch (nbanks) { + case 2: + return ADDR_SURF_2_BANK; + case 4: + return ADDR_SURF_4_BANK; + case 8: + default: + return ADDR_SURF_8_BANK; + case 16: + return ADDR_SURF_16_BANK; + } +} + +static u32 evergreen_cs_get_tile_split(u32 row_size) +{ + switch (row_size) { + case 1: + default: + return ADDR_SURF_TILE_SPLIT_1KB; + case 2: + return ADDR_SURF_TILE_SPLIT_2KB; + case 4: + return ADDR_SURF_TILE_SPLIT_4KB; + } +} + static void evergreen_cs_track_init(struct evergreen_cs_track *track) { int i; @@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } break; case DB_Z_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } track->db_z_info = radeon_get_ib_value(p, idx); - ib[idx] &= ~Z_ARRAY_MODE(0xf); - track->db_z_info &= ~Z_ARRAY_MODE(0xf); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else { - ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] &= ~Z_ARRAY_MODE(0xf); + track->db_z_info &= ~Z_ARRAY_MODE(0xf); + ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + } } break; case DB_STENCIL_INFO: @@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR5_INFO: case CB_COLOR6_INFO: case CB_COLOR7_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } tmp = (reg - CB_COLOR0_INFO) / 0x3c; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR8_INFO: case CB_COLOR9_INFO: case CB_COLOR10_INFO: case CB_COLOR11_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR0_PITCH: @@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR9_ATTRIB: case CB_COLOR10_ATTRIB: case CB_COLOR11_ATTRIB: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + } break; case CB_COLOR0_DIM: case CB_COLOR1_DIM: @@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + ib[idx+1+(i*8)+1] |= + TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx+1+(i*8)+6] |= + TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + ib[idx+1+(i*8)+7] |= + TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + } + } texture = reloc->robj; /* tex mip base */ r = evergreen_cs_packet_next_reloc(p, &reloc); @@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct evergreen_cs_track *track; + u32 tmp; int r; if (p->track == NULL) { @@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) if (track == NULL) return -ENOMEM; evergreen_cs_track_init(track); - track->npipes = p->rdev->config.evergreen.tiling_npipes; - track->nbanks = p->rdev->config.evergreen.tiling_nbanks; - track->group_size = p->rdev->config.evergreen.tiling_group_size; + if (p->rdev->family >= CHIP_CAYMAN) + tmp = p->rdev->config.cayman.tile_config; + else + tmp = p->rdev->config.evergreen.tile_config; + + switch (tmp & 0xf) { + case 0: + track->npipes = 1; + break; + case 1: + default: + track->npipes = 2; + break; + case 2: + track->npipes = 4; + break; + case 3: + track->npipes = 8; + break; + } + + switch ((tmp & 0xf0) >> 4) { + case 0: + track->nbanks = 4; + break; + case 1: + default: + track->nbanks = 8; + break; + case 2: + track->nbanks = 16; + break; + } + + switch ((tmp & 0xf00) >> 8) { + case 0: + track->group_size = 256; + break; + case 1: + default: + track->group_size = 512; + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: + track->row_size = 1; + break; + case 1: + default: + track->row_size = 2; + break; + case 2: + track->row_size = 4; + break; + } + p->track = track; } do { diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index c781c92c3451..7d7f2155e34c 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -42,6 +42,17 @@ # define EVERGREEN_GRPH_DEPTH_8BPP 0 # define EVERGREEN_GRPH_DEPTH_16BPP 1 # define EVERGREEN_GRPH_DEPTH_32BPP 2 +# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) +# define EVERGREEN_ADDR_SURF_2_BANK 0 +# define EVERGREEN_ADDR_SURF_4_BANK 1 +# define EVERGREEN_ADDR_SURF_8_BANK 2 +# define EVERGREEN_ADDR_SURF_16_BANK 3 +# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4) +# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3 # define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) /* 8 BPP */ # define EVERGREEN_GRPH_FORMAT_INDEXED 0 @@ -61,6 +72,24 @@ # define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 # define EVERGREEN_GRPH_FORMAT_RGB111110 6 # define EVERGREEN_GRPH_FORMAT_BGR101111 7 +# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3 +# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6 +# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3 # define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) # define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 # define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b937c49054d9..e00039e59a75 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -899,6 +899,10 @@ #define DB_HTILE_DATA_BASE 0x28014 #define DB_Z_INFO 0x28040 # define Z_ARRAY_MODE(x) ((x) << 4) +# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) +# define DB_NUM_BANKS(x) (((x) & 0x3) << 12) +# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) +# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) #define DB_STENCIL_INFO 0x28044 #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c @@ -951,6 +955,29 @@ # define CB_SF_EXPORT_FULL 0 # define CB_SF_EXPORT_NORM 1 #define CB_COLOR0_ATTRIB 0x28c74 +# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) +# define ADDR_SURF_TILE_SPLIT_64B 0 +# define ADDR_SURF_TILE_SPLIT_128B 1 +# define ADDR_SURF_TILE_SPLIT_256B 2 +# define ADDR_SURF_TILE_SPLIT_512B 3 +# define ADDR_SURF_TILE_SPLIT_1KB 4 +# define ADDR_SURF_TILE_SPLIT_2KB 5 +# define ADDR_SURF_TILE_SPLIT_4KB 6 +# define CB_NUM_BANKS(x) (((x) & 0x3) << 10) +# define ADDR_SURF_2_BANK 0 +# define ADDR_SURF_4_BANK 1 +# define ADDR_SURF_8_BANK 2 +# define ADDR_SURF_16_BANK 3 +# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13) +# define ADDR_SURF_BANK_WIDTH_1 0 +# define ADDR_SURF_BANK_WIDTH_2 1 +# define ADDR_SURF_BANK_WIDTH_4 2 +# define ADDR_SURF_BANK_WIDTH_8 3 +# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16) +# define ADDR_SURF_BANK_HEIGHT_1 0 +# define ADDR_SURF_BANK_HEIGHT_2 1 +# define ADDR_SURF_BANK_HEIGHT_4 2 +# define ADDR_SURF_BANK_HEIGHT_8 3 #define CB_COLOR0_DIM 0x28c78 /* only CB0-7 blocks have these regs */ #define CB_COLOR0_CMASK 0x28c7c @@ -1137,7 +1164,11 @@ # define SQ_SEL_1 5 #define SQ_TEX_RESOURCE_WORD5_0 0x30014 #define SQ_TEX_RESOURCE_WORD6_0 0x30018 +# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) #define SQ_TEX_RESOURCE_WORD7_0 0x3001c +# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) +# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) +# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) #define SQ_VTX_CONSTANT_WORD0_0 0x30000 #define SQ_VTX_CONSTANT_WORD1_0 0x30004 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 556b7bc3418b..0e5799857465 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/module.h> #include "drmP.h" #include "radeon.h" #include "radeon_asic.h" @@ -261,8 +262,11 @@ int ni_mc_load_microcode(struct radeon_device *rdev) WREG32(MC_SEQ_SUP_CNTL, 0x00000001); /* wait for training to complete */ - while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) - udelay(10); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) + break; + udelay(1); + } if (running) WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); @@ -932,7 +936,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -977,8 +981,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) void cayman_pcie_gart_disable(struct radeon_device *rdev) { - int r; - /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -994,14 +996,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev) WREG32(VM_L2_CNTL2, 0); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void cayman_pcie_gart_fini(struct radeon_device *rdev) @@ -1361,6 +1356,10 @@ static int cayman_startup(struct radeon_device *rdev) return r; } + r = r600_vram_scratch_init(rdev); + if (r) + return r; + evergreen_mc_program(rdev); r = cayman_pcie_gart_enable(rdev); if (r) @@ -1556,6 +1555,7 @@ void cayman_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); cayman_pcie_gart_fini(rdev); + r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_bo_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8f8b8fa14357..bfc08f6320f8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -41,6 +41,7 @@ #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/module.h> #include "r100_reg_safe.h" #include "rn50_reg_safe.h" @@ -186,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; + int i; /* Lock the graphics update lock */ /* update the scanout addresses */ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); /* Wait for update_pending to go high. */ - while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -536,6 +542,7 @@ void r100_hpd_init(struct radeon_device *rdev) default: break; } + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } if (rdev->irq.installed) r100_irq_set(rdev); @@ -576,7 +583,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) { int r; - if (rdev->gart.table.ram.ptr) { + if (rdev->gart.ptr) { WARN(1, "R100 PCI GART already initialized\n"); return 0; } @@ -635,10 +642,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev) int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { + u32 *gtt = rdev->gart.ptr; + if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); + gtt[i] = cpu_to_le32(lower_32_bits(addr)); return 0; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 33f2b68c680b..c93bc64707e1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + void __iomem *ptr = rdev->gart.ptr; if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; @@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj) { + if (rdev->gart.robj) { WARN(1, "RV370 PCIE GART already initialized\n"); return 0; } @@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) uint32_t tmp; int r; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) void rv370_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int r; WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); @@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void rv370_pcie_gart_fini(struct radeon_device *rdev) @@ -709,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return r; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_TXO_MACRO_TILE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_TXO_MICRO_TILE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_TXO_MICRO_TILE_SQUARE; - - tmp = idx_value + ((u32)reloc->lobj.gpu_offset); - tmp |= tile_flags; - ib[idx] = tmp; + if (p->keep_tiling_flags) { + ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ + ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); + } else { + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_TXO_MACRO_TILE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_TXO_MICRO_TILE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_TXO_MICRO_TILE_SQUARE; + + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); + tmp |= tile_flags; + ib[idx] = tmp; + } track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -768,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); - r100_cs_dump_packet(p, pkt); - return r; - } + if (!p->keep_tiling_flags) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_COLOR_TILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_COLOR_MICROTILE_ENABLE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_COLOR_MICROTILE_ENABLE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; - tmp = idx_value & ~(0x7 << 16); - tmp |= tile_flags; - ib[idx] = tmp; + tmp = idx_value & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + } i = (reg - 0x4E38) >> 2; track->cb[i].pitch = idx_value & 0x3FFE; switch (((idx_value >> 21) & 0xF)) { @@ -851,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4F24: /* ZB_DEPTHPITCH */ - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); - r100_cs_dump_packet(p, pkt); - return r; - } - - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_DEPTHMACROTILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_DEPTHMICROTILE_TILED; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; + if (!p->keep_tiling_flags) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } - tmp = idx_value & ~(0x7 << 16); - tmp |= tile_flags; - ib[idx] = tmp; + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_DEPTHMACROTILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_DEPTHMICROTILE_TILED; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; + tmp = idx_value & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + } track->zb.pitch = idx_value & 0x3FFC; track->zb_dirty = true; break; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 12470b090ddf..9cdda0b3b081 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -29,6 +29,7 @@ #include <linux/seq_file.h> #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/module.h> #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" @@ -287,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) pcie_lanes); } -static int r600_pm_get_type_index(struct radeon_device *rdev, - enum radeon_pm_state_type ps_type, - int instance) -{ - int i; - int found_instance = -1; - - for (i = 0; i < rdev->pm.num_power_states; i++) { - if (rdev->pm.power_state[i].type == ps_type) { - found_instance++; - if (found_instance == instance) - return i; - } - } - /* return default if no match */ - return rdev->pm.default_power_state_index; -} - void rs780_pm_init_profile(struct radeon_device *rdev) { if (rdev->pm.num_power_states == 2) { @@ -420,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev) void r600_pm_init_profile(struct radeon_device *rdev) { + int idx; + if (rdev->family == CHIP_R600) { /* XXX */ /* default */ @@ -501,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; /* low sh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; - } else { - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; - } + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; /* mid sh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; - } else { - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; - } + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; /* high sh */ - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; /* low mh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; - } else { - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; - } + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; /* mid mh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; - } else { - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; - } + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; /* high mh */ - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; } @@ -762,13 +709,14 @@ void r600_hpd_init(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; struct drm_connector *connector; - if (ASIC_IS_DCE3(rdev)) { - u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); - if (ASIC_IS_DCE32(rdev)) - tmp |= DC_HPDx_EN; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (ASIC_IS_DCE3(rdev)) { + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); + if (ASIC_IS_DCE32(rdev)) + tmp |= DC_HPDx_EN; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); @@ -798,10 +746,7 @@ void r600_hpd_init(struct radeon_device *rdev) default: break; } - } - } else { - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); + } else { switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); @@ -819,6 +764,7 @@ void r600_hpd_init(struct radeon_device *rdev) break; } } + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } if (rdev->irq.installed) r600_irq_set(rdev); @@ -896,7 +842,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) /* flush hdp cache so updates hit vram */ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && !(rdev->flags & RADEON_IS_AGP)) { - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + void __iomem *ptr = (void *)rdev->gart.ptr; u32 tmp; /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read @@ -931,7 +877,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj) { + if (rdev->gart.robj) { WARN(1, "R600 PCIE GART already initialized\n"); return 0; } @@ -948,7 +894,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) u32 tmp; int r, i; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -1003,7 +949,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) void r600_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i, r; + int i; /* Disable all tables */ for (i = 0; i < 7; i++) @@ -1030,14 +976,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void r600_pcie_gart_fini(struct radeon_device *rdev) @@ -1137,7 +1076,7 @@ static void r600_mc_program(struct radeon_device *rdev) WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); } - WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); @@ -1276,6 +1215,53 @@ int r600_mc_init(struct radeon_device *rdev) return 0; } +int r600_vram_scratch_init(struct radeon_device *rdev) +{ + int r; + + if (rdev->vram_scratch.robj == NULL) { + r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, + PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->vram_scratch.robj); + if (r) { + return r; + } + } + + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->vram_scratch.robj, + RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->vram_scratch.robj); + return r; + } + r = radeon_bo_kmap(rdev->vram_scratch.robj, + (void **)&rdev->vram_scratch.ptr); + if (r) + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + + return r; +} + +void r600_vram_scratch_fini(struct radeon_device *rdev) +{ + int r; + + if (rdev->vram_scratch.robj == NULL) { + return; + } + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->vram_scratch.robj); + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + } + radeon_bo_unref(&rdev->vram_scratch.robj); +} + /* We doesn't check that the GPU really needs a reset we simply do the * reset, it's up to the caller to determine if the GPU needs one. We * might add an helper function to check that. @@ -2331,6 +2317,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev, if (rdev->wb.use_event) { u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); + /* flush read cache over gart */ + radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); + radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | + PACKET3_VC_ACTION_ENA | + PACKET3_SH_ACTION_ENA); + radeon_ring_write(rdev, 0xFFFFFFFF); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 10); /* poll interval */ /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); @@ -2339,6 +2333,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, fence->seq); radeon_ring_write(rdev, 0); } else { + /* flush read cache over gart */ + radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); + radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | + PACKET3_VC_ACTION_ENA | + PACKET3_SH_ACTION_ENA); + radeon_ring_write(rdev, 0xFFFFFFFF); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 10); /* poll interval */ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); /* wait for 3D idle clean */ @@ -2420,6 +2422,10 @@ int r600_startup(struct radeon_device *rdev) } } + r = r600_vram_scratch_init(rdev); + if (r) + return r; + r600_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); @@ -2640,6 +2646,7 @@ void r600_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); + r600_vram_scratch_fini(rdev); radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index c4cf1308d4a1..e09d2818f949 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -201,7 +201,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, - u64 gpu_addr) + u64 gpu_addr, u32 size) { uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; @@ -222,6 +222,9 @@ set_tex_resource(struct radeon_device *rdev, S_038010_DST_SEL_Z(SQ_SEL_Z) | S_038010_DST_SEL_W(SQ_SEL_W); + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, size, gpu_addr); + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, sq_tex_resource_word0); @@ -500,9 +503,9 @@ int r600_blit_init(struct radeon_device *rdev) rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ - rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ + rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ - rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ + rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_per_loop = 76; /* set_render_target emits 2 extra dwords on rv6xx */ @@ -760,10 +763,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb[11] = i2f(h); rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, - w, h, w, src_gpu_addr); - rdev->r600_blit.primitives.cp_set_surface_sync(rdev, - PACKET3_TC_ACTION_ENA, - size_in_bytes, src_gpu_addr); + w, h, w, src_gpu_addr, size_in_bytes); rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, w, h, dst_gpu_addr); rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 45fd592f9606..c9db4931913f 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -26,6 +26,8 @@ * Alex Deucher <alexander.deucher@amd.com> */ +#include <linux/module.h> + #include "drmP.h" #include "drm.h" #include "radeon_drm.h" diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0a2e023c1557..cb1acffd2430 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->db_depth_control = radeon_get_ib_value(p, idx); break; case R_028010_DB_DEPTH_INFO: - if (r600_cs_packet_next_is_pkt3_nop(p)) { + if (!p->keep_tiling_flags && + r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " @@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_0280B4_CB_COLOR5_INFO: case R_0280B8_CB_COLOR6_INFO: case R_0280BC_CB_COLOR7_INFO: - if (r600_cs_packet_next_is_pkt3_nop(p)) { + if (!p->keep_tiling_flags && + r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); @@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, mip_offset <<= 8; word0 = radeon_get_ib_value(p, idx + 0); - if (tiling_flags & RADEON_TILING_MACRO) - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); - else if (tiling_flags & RADEON_TILING_MICRO) - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + if (tiling_flags & RADEON_TILING_MACRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (tiling_flags & RADEON_TILING_MICRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + } word1 = radeon_get_ib_value(p, idx + 1); w0 = G_038000_TEX_WIDTH(word0) + 1; h0 = G_038004_TEX_HEIGHT(word1) + 1; @@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + } texture = reloc->robj; /* tex mip base */ r = r600_cs_packet_next_reloc(p, &reloc); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e3170c794c1d..8227e76b5c70 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -93,6 +93,7 @@ extern int radeon_audio; extern int radeon_disp_priority; extern int radeon_hw_i2c; extern int radeon_pcie_gen2; +extern int radeon_msi; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -306,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv, */ struct radeon_mc; -struct radeon_gart_table_ram { - volatile uint32_t *ptr; -}; - -struct radeon_gart_table_vram { - struct radeon_bo *robj; - volatile uint32_t *ptr; -}; - -union radeon_gart_table { - struct radeon_gart_table_ram ram; - struct radeon_gart_table_vram vram; -}; - #define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_SHIFT 12 struct radeon_gart { dma_addr_t table_addr; + struct radeon_bo *robj; + void *ptr; unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; - union radeon_gart_table table; struct page **pages; dma_addr_t *pages_addr; bool *ttm_alloced; @@ -340,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev); void radeon_gart_table_ram_free(struct radeon_device *rdev); int radeon_gart_table_vram_alloc(struct radeon_device *rdev); void radeon_gart_table_vram_free(struct radeon_device *rdev); +int radeon_gart_table_vram_pin(struct radeon_device *rdev); +void radeon_gart_table_vram_unpin(struct radeon_device *rdev); int radeon_gart_init(struct radeon_device *rdev); void radeon_gart_fini(struct radeon_device *rdev); void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, @@ -347,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int pages, struct page **pagelist, dma_addr_t *dma_addr); +void radeon_gart_restore(struct radeon_device *rdev); /* @@ -437,25 +428,26 @@ union radeon_irq_stat_regs { struct evergreen_irq_stat_regs evergreen; }; +#define RADEON_MAX_HPD_PINS 6 +#define RADEON_MAX_CRTCS 6 +#define RADEON_MAX_HDMI_BLOCKS 2 + struct radeon_irq { bool installed; bool sw_int; - /* FIXME: use a define max crtc rather than hardcode it */ - bool crtc_vblank_int[6]; - bool pflip[6]; + bool crtc_vblank_int[RADEON_MAX_CRTCS]; + bool pflip[RADEON_MAX_CRTCS]; wait_queue_head_t vblank_queue; - /* FIXME: use defines for max hpd/dacs */ - bool hpd[6]; + bool hpd[RADEON_MAX_HPD_PINS]; bool gui_idle; bool gui_idle_acked; wait_queue_head_t idle_queue; - /* FIXME: use defines for max HDMI blocks */ - bool hdmi[2]; + bool hdmi[RADEON_MAX_HDMI_BLOCKS]; spinlock_t sw_lock; int sw_refcount; union radeon_irq_stat_regs stat_regs; - spinlock_t pflip_lock[6]; - int pflip_refcount[6]; + spinlock_t pflip_lock[RADEON_MAX_CRTCS]; + int pflip_refcount[RADEON_MAX_CRTCS]; }; int radeon_irq_kms_init(struct radeon_device *rdev); @@ -533,7 +525,7 @@ struct r600_blit_cp_primitives { void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); void (*set_tex_resource)(struct radeon_device *rdev, int format, int w, int h, int pitch, - u64 gpu_addr); + u64 gpu_addr, u32 size); void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, int x2, int y2); void (*draw_auto)(struct radeon_device *rdev); @@ -619,7 +611,8 @@ struct radeon_cs_parser { struct radeon_ib *ib; void *track; unsigned family; - int parser_error; + int parser_error; + bool keep_tiling_flags; }; extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); @@ -792,8 +785,7 @@ struct radeon_pm_clock_info { struct radeon_power_state { enum radeon_pm_state_type type; - /* XXX: use a define for num clock modes */ - struct radeon_pm_clock_info clock_info[8]; + struct radeon_pm_clock_info *clock_info; /* number of valid clock modes in this power state */ int num_clock_modes; struct radeon_pm_clock_info *default_clock_mode; @@ -863,6 +855,9 @@ struct radeon_pm { struct device *int_hwmon_dev; }; +int radeon_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance); /* * Benchmarking @@ -1143,12 +1138,55 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -/* VRAM scratch page for HDP bug */ -struct r700_vram_scratch { +/* VRAM scratch page for HDP bug, default vram page */ +struct r600_vram_scratch { struct radeon_bo *robj; volatile uint32_t *ptr; + u64 gpu_addr; }; + +/* + * Mutex which allows recursive locking from the same process. + */ +struct radeon_mutex { + struct mutex mutex; + struct task_struct *owner; + int level; +}; + +static inline void radeon_mutex_init(struct radeon_mutex *mutex) +{ + mutex_init(&mutex->mutex); + mutex->owner = NULL; + mutex->level = 0; +} + +static inline void radeon_mutex_lock(struct radeon_mutex *mutex) +{ + if (mutex_trylock(&mutex->mutex)) { + /* The mutex was unlocked before, so it's ours now */ + mutex->owner = current; + } else if (mutex->owner != current) { + /* Another process locked the mutex, take it */ + mutex_lock(&mutex->mutex); + mutex->owner = current; + } + /* Otherwise the mutex was already locked by this process */ + + mutex->level++; +} + +static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) +{ + if (--mutex->level > 0) + return; + + mutex->owner = NULL; + mutex_unlock(&mutex->mutex); +} + + /* * Core structure, functions and helpers. */ @@ -1204,7 +1242,7 @@ struct radeon_device { struct radeon_gem gem; struct radeon_pm pm; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; - struct mutex cs_mutex; + struct radeon_mutex cs_mutex; struct radeon_wb wb; struct radeon_dummy_page dummy_page; bool gpu_lockup; @@ -1218,7 +1256,7 @@ struct radeon_device { const struct firmware *rlc_fw; /* r6/700 RLC firmware */ const struct firmware *mc_fw; /* NI MC firmware */ struct r600_blit r600_blit; - struct r700_vram_scratch vram_scratch; + struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ struct work_struct hotplug_work; @@ -1442,8 +1480,6 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v); /* AGP */ extern int radeon_gpu_reset(struct radeon_device *rdev); extern void radeon_agp_disable(struct radeon_device *rdev); -extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); -extern void radeon_gart_restore(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev); @@ -1467,6 +1503,12 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); /* + * R600 vram scratch functions + */ +int r600_vram_scratch_init(struct radeon_device *rdev); +void r600_vram_scratch_fini(struct radeon_device *rdev); + +/* * r600 functions used by radeon_encoder.c */ extern void r600_hdmi_enable(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 3f6636bb2d7f..3516a6081dcf 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle) /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); + DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", + acpi_format_exception(status)); kfree(buffer.pointer); return 1; } @@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev) acpi_handle handle; int ret; - /* No need to proceed if we're sure that ATIF is not supported */ - if (!ASIC_IS_AVIVO(rdev) || !rdev->bios) - return 0; - /* Get the device handle */ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + /* No need to proceed if we're sure that ATIF is not supported */ + if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) + return 0; + /* Call the ATIF method */ ret = radeon_atif_call(handle); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e2944566ffea..a2e1eae114ef 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = { .pm_misc = &evergreen_pm_misc, .pm_prepare = &evergreen_pm_prepare, .pm_finish = &evergreen_pm_finish, - .pm_init_profile = &rs780_pm_init_profile, + .pm_init_profile = &sumo_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 85f14f0337e4..59914842a729 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); +extern void sumo_pm_init_profile(struct radeon_device *rdev); extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08d0b94332e6..d24baf30efcb 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -62,6 +62,87 @@ union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; +static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, + ATOM_GPIO_I2C_ASSIGMENT *gpio, + u8 index) +{ + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ + if ((rdev->family == CHIP_R420) || + (rdev->family == CHIP_R423) || + (rdev->family == CHIP_RV410)) { + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { + gpio->ucClkMaskShift = 0x19; + gpio->ucDataMaskShift = 0x18; + } + } + + /* some evergreen boards have bad data for this entry */ + if (ASIC_IS_DCE4(rdev)) { + if ((index == 7) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && + (gpio->sucI2cId.ucAccess == 0)) { + gpio->sucI2cId.ucAccess = 0x97; + gpio->ucDataMaskShift = 8; + gpio->ucDataEnShift = 8; + gpio->ucDataY_Shift = 8; + gpio->ucDataA_Shift = 8; + } + } + + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((index == 4) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } +} + +static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) +{ + struct radeon_i2c_bus_rec i2c; + + memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); + + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); + i2c.en_data_mask = (1 << gpio->ucDataEnShift); + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); + + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) + i2c.hw_capable = true; + else + i2c.hw_capable = false; + + if (gpio->sucI2cId.ucAccess == 0xa0) + i2c.mm_i2c = true; + else + i2c.mm_i2c = false; + + i2c.i2c_id = gpio->sucI2cId.ucAccess; + + if (i2c.mask_clk_reg) + i2c.valid = true; + else + i2c.valid = false; + + return i2c; +} + static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, uint8_t id) { @@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; - /* some evergreen boards have bad data for this entry */ - if (ASIC_IS_DCE4(rdev)) { - if ((i == 7) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && - (gpio->sucI2cId.ucAccess == 0)) { - gpio->sucI2cId.ucAccess = 0x97; - gpio->ucDataMaskShift = 8; - gpio->ucDataEnShift = 8; - gpio->ucDataY_Shift = 8; - gpio->ucDataA_Shift = 8; - } - } - - /* some DCE3 boards have bad data for this entry */ - if (ASIC_IS_DCE3(rdev)) { - if ((i == 4) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && - (gpio->sucI2cId.ucAccess == 0x94)) - gpio->sucI2cId.ucAccess = 0x14; - } + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); if (gpio->sucI2cId.ucAccess == id) { - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); - i2c.en_data_mask = (1 << gpio->ucDataEnShift); - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); - - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) - i2c.hw_capable = true; - else - i2c.hw_capable = false; - - if (gpio->sucI2cId.ucAccess == 0xa0) - i2c.mm_i2c = true; - else - i2c.mm_i2c = false; - - i2c.i2c_id = gpio->sucI2cId.ucAccess; - - if (i2c.mask_clk_reg) - i2c.valid = true; + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); break; } } @@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) int i, num_indices; char stmp[32]; - memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); - if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); @@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; - i2c.valid = false; - - /* some evergreen boards have bad data for this entry */ - if (ASIC_IS_DCE4(rdev)) { - if ((i == 7) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && - (gpio->sucI2cId.ucAccess == 0)) { - gpio->sucI2cId.ucAccess = 0x97; - gpio->ucDataMaskShift = 8; - gpio->ucDataEnShift = 8; - gpio->ucDataY_Shift = 8; - gpio->ucDataA_Shift = 8; - } - } - /* some DCE3 boards have bad data for this entry */ - if (ASIC_IS_DCE3(rdev)) { - if ((i == 4) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && - (gpio->sucI2cId.ucAccess == 0x94)) - gpio->sucI2cId.ucAccess = 0x14; - } + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); - i2c.en_data_mask = (1 << gpio->ucDataEnShift); - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); - - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) - i2c.hw_capable = true; - else - i2c.hw_capable = false; - - if (gpio->sucI2cId.ucAccess == 0xa0) - i2c.mm_i2c = true; - else - i2c.mm_i2c = false; + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); - i2c.i2c_id = gpio->sucI2cId.ucAccess; - - if (i2c.mask_clk_reg) { - i2c.valid = true; + if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } @@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (!rdev->pm.power_state[state_index].clock_info) + return state_index; + rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { case 1: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) state_index++; break; case 2: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) state_index++; break; case 3: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - if (ASIC_IS_DCE5(rdev)) { + if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; @@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); - for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { - clock_info = (union pplib_clock_info *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + - (power_state->v1.ucClockStateIndices[j] * - power_info->pplib.ucClockInfoSize)); - valid = radeon_atombios_parse_pplib_clock_info(rdev, - state_index, mode_index, - clock_info); - if (valid) - mode_index++; + rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * + ((power_info->pplib.ucStateEntrySize - 1) ? + (power_info->pplib.ucStateEntrySize - 1) : 1), + GFP_KERNEL); + if (!rdev->pm.power_state[i].clock_info) + return state_index; + if (power_info->pplib.ucStateEntrySize - 1) { + for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { + clock_info = (union pplib_clock_info *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + + (power_state->v1.ucClockStateIndices[j] * + power_info->pplib.ucClockInfoSize)); + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + } else { + rdev->pm.power_state[state_index].clock_info[0].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = + rdev->clock.default_sclk; + mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { @@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; - /* XXX this might be an inagua bug... */ - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index]; - valid = radeon_atombios_parse_pplib_clock_info(rdev, - state_index, mode_index, - clock_info); - if (valid) - mode_index++; + rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * + (power_state->v2.ucNumDPMLevels ? + power_state->v2.ucNumDPMLevels : 1), + GFP_KERNEL); + if (!rdev->pm.power_state[i].clock_info) + return state_index; + if (power_state->v2.ucNumDPMLevels) { + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + /* XXX this might be an inagua bug... */ + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index]; + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + } else { + rdev->pm.power_state[state_index].clock_info[0].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = + rdev->clock.default_sclk; + mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { @@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { - /* add the default mode */ - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - rdev->pm.power_state[state_index].pcie_lanes = 16; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].flags = 0; - state_index++; + rdev->pm.power_state[0].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (rdev->pm.power_state[0].clock_info) { + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + rdev->pm.power_state[state_index].pcie_lanes = 16; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].flags = 0; + state_index++; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 5cafc90de7f8..17e1a9b2d8fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, struct radeon_bo *sobj = NULL; uint64_t saddr, daddr; int r, n; - unsigned int time; + int time; n = RADEON_BENCHMARK_ITERATIONS; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 8bf83c4b4147..81fc100be7e1 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) /* allocate 2 power states */ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); - if (!rdev->pm.power_state) { - rdev->pm.default_power_state_index = state_index; - rdev->pm.num_power_states = 0; - - rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; - rdev->pm.current_clock_mode_index = 0; - return; - } + if (rdev->pm.power_state) { + /* allocate 1 clock mode per state */ + rdev->pm.power_state[0].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + rdev->pm.power_state[1].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (!rdev->pm.power_state[0].clock_info || + !rdev->pm.power_state[1].clock_info) + goto pm_failed; + } else + goto pm_failed; /* check for a thermal chip */ offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); @@ -2735,6 +2738,14 @@ default_mode: rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; + return; + +pm_failed: + rdev->pm.default_power_state_index = state_index; + rdev->pm.num_power_states = 0; + + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; } void radeon_external_tmds_setup(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index dec6cbe6a0a6..e7cb3ab09243 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -44,8 +44,6 @@ extern void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct drm_connector *drm_connector); -bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); - void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -432,55 +430,6 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr return 0; } -/* - * Some integrated ATI Radeon chipset implementations (e. g. - * Asus M2A-VM HDMI) may indicate the availability of a DDC, - * even when there's no monitor connected. For these connectors - * following DDC probe extension will be applied: check also for the - * availability of EDID with at least a correct EDID header. Only then, - * DDC is assumed to be available. This prevents drm_get_edid() and - * drm_edid_block_valid() from periodically dumping data and kernel - * errors into the logs and onto the terminal. - */ -static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, - uint32_t supported_device, - int connector_type) -{ - /* Asus M2A-VM HDMI board sends data to i2c bus even, - * if HDMI add-on card is not plugged in or HDMI is disabled in - * BIOS. Valid DDC can only be assumed, if also a valid EDID header - * can be retrieved via i2c bus during DDC probe */ - if ((dev->pdev->device == 0x791e) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0x826d)) { - if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && - (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) - return true; - } - /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus - * for a DVI connector that is not implemented */ - if ((dev->pdev->device == 0x796e) && - (dev->pdev->subsystem_vendor == 0x1019) && - (dev->pdev->subsystem_device == 0x2615)) { - if ((connector_type == DRM_MODE_CONNECTOR_DVID) && - (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) - return true; - } - /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 - * (RS690M) sends data to i2c bus for a HDMI connector that - * is not implemented */ - if ((dev->pdev->device == 0x791f) && - (dev->pdev->subsystem_vendor == 0x1179) && - (dev->pdev->subsystem_device == 0xff68)) { - if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && - (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) - return true; - } - - /* Default: no EDID header probe required for DDC probing */ - return false; -} - static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -721,8 +670,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe); + dret = radeon_ddc_probe(radeon_connector); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -764,7 +712,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); - if (ret == connector_status_connected) + if (ret != connector_status_disconnected) radeon_connector->detected_by_load = true; } } @@ -904,8 +852,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) bool dret = false; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe); + dret = radeon_ddc_probe(radeon_connector); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -1005,8 +952,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = encoder_funcs->detect(encoder, connector); if (ret == connector_status_connected) { radeon_connector->use_digital = false; - radeon_connector->detected_by_load = true; } + if (ret != connector_status_disconnected) + radeon_connector->detected_by_load = true; } break; } @@ -1203,7 +1151,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) } } else { /* need to setup ddc on the bridge */ - if (radeon_connector_encoder_is_dp_bridge(connector)) { + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != + ENCODER_OBJECT_ID_NONE) { if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } @@ -1213,13 +1162,12 @@ static int radeon_dp_get_modes(struct drm_connector *connector) return ret; } -bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) +u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) { struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; - bool found = false; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) @@ -1235,14 +1183,13 @@ bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: - found = true; - break; + return radeon_encoder->encoder_id; default: break; } } - return found; + return ENCODER_OBJECT_ID_NONE; } bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) @@ -1319,7 +1266,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); - } else if (radeon_connector_encoder_is_dp_bridge(connector)) { + } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != + ENCODER_OBJECT_ID_NONE) { /* DP bridges are always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; /* get the DPCD from the bridge */ @@ -1328,8 +1276,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (encoder) { /* setup ddc on the bridge */ radeon_atom_ext_encoder_setup_ddc(encoder); - if (radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe)) /* try DDC */ + if (radeon_ddc_probe(radeon_connector)) /* try DDC */ ret = connector_status_connected; else if (radeon_connector->dac_load_detect) { /* try load detection */ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -1347,8 +1294,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { - if (radeon_ddc_probe(radeon_connector, - radeon_connector->requires_extended_probe)) + if (radeon_ddc_probe(radeon_connector)) ret = connector_status_connected; } } @@ -1493,9 +1439,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_connector->shared_ddc = shared_ddc; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; - radeon_connector->requires_extended_probe = - radeon_connector_needs_extended_probe(rdev, supported_device, - connector_type); + radeon_connector->router = *router; if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); @@ -1842,9 +1786,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->devices = supported_device; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; - radeon_connector->requires_extended_probe = - radeon_connector_needs_extended_probe(rdev, supported_device, - connector_type); + switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 045ec59478f9..72ae8266b8e9 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -29,6 +29,8 @@ * Gareth Hughes <gareth@valinux.com> */ +#include <linux/module.h> + #include "drmP.h" #include "drm.h" #include "drm_sarea.h" diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index fae00c0d75aa..29afd71e0840 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; - unsigned size, i; + unsigned size, i, flags = 0; if (!cs->num_chunks) { return 0; @@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) if (p->chunks[i].length_dw == 0) return -EINVAL; } + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && + !p->chunks[i].length_dw) { + return -EINVAL; + } p->chunks[i].length_dw = user_chunk.length_dw; p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; @@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[i].user_ptr, size)) { return -EFAULT; } + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { + flags = p->chunks[i].kdata[0]; + } } else { p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); @@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } + + p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; return 0; } @@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct radeon_cs_chunk *ib_chunk; int r; - mutex_lock(&rdev->cs_mutex); + radeon_mutex_lock(&rdev->cs_mutex); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; @@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_get(rdev, &parser.ib); if (r) { DRM_ERROR("Failed to get ib !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); @@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } /* Copy the packet into the IB, the parser will read from the @@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r || parser.parser_error) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_schedule(rdev, parser.ib); @@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_ERROR("Failed to schedule IB !\n"); } radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c33bc914d93d..c4d00a171411 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - mutex_init(&rdev->cs_mutex); + radeon_mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->cp.mutex); mutex_init(&rdev->dc_hw_i2c_mutex); @@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev) int r; int resched; + /* Prevent CS ioctl from interfering */ + radeon_mutex_lock(&rdev->cs_mutex); + radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); @@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_restore_bios_scratch_regs(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); - return 0; } - /* bad news, how to tell it to userspace ? */ - dev_info(rdev->dev, "GPU reset failed\n"); + + radeon_mutex_unlock(&rdev->cs_mutex); + + if (r) { + /* bad news, how to tell it to userspace ? */ + dev_info(rdev->dev, "GPU reset failed\n"); + } + return r; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6adb3e58affd..a22d6e6a49a2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -33,8 +33,6 @@ #include "drm_crtc_helper.h" #include "drm_edid.h" -static int radeon_ddc_dump(struct drm_connector *connector); - static void avivo_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -669,7 +667,6 @@ static void radeon_print_display_setup(struct drm_device *dev) static bool radeon_setup_enc_conn(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; - struct drm_connector *drm_connector; bool ret = false; if (rdev->bios) { @@ -689,8 +686,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) if (ret) { radeon_setup_encoder_clones(dev); radeon_print_display_setup(dev); - list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) - radeon_ddc_dump(drm_connector); } return ret; @@ -708,7 +703,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || - radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { + (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != + ENCODER_OBJECT_ID_NONE)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || @@ -743,34 +739,6 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) return 0; } -static int radeon_ddc_dump(struct drm_connector *connector) -{ - struct edid *edid; - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - int ret = 0; - - /* on hw with routers, select right port */ - if (radeon_connector->router.ddc_valid) - radeon_router_select_ddc_port(radeon_connector); - - if (!radeon_connector->ddc_bus) - return -1; - edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); - /* Log EDID retrieval status here. In particular with regard to - * connectors with requires_extended_probe flag set, that will prevent - * function radeon_dvi_detect() to fetch EDID on this connector, - * as long as there is no valid EDID header found */ - if (edid) { - DRM_INFO("Radeon display connector %s: Found valid EDID", - drm_get_connector_name(connector)); - kfree(edid); - } else { - DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID", - drm_get_connector_name(connector)); - } - return ret; -} - /* avivo */ static void avivo_get_fb_div(struct radeon_pll *pll, u32 target_clock, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e71d2ed7fa11..71499fc3daf5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -36,6 +36,7 @@ #include "drm_pciids.h" #include <linux/console.h> +#include <linux/module.h> /* @@ -52,9 +53,10 @@ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query * 2.10.0 - fusion 2D tiling * 2.11.0 - backend map, initial compute support for the CS checker + * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 11 +#define KMS_DRIVER_MINOR 12 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -118,6 +120,7 @@ int radeon_audio = 0; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; int radeon_pcie_gen2 = 0; +int radeon_msi = -1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -164,6 +167,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); +MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(msi, radeon_msi, int, 0444); + static int radeon_suspend(struct drm_device *dev, pm_message_t state) { drm_radeon_private_t *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index eb3f6dc6df83..06e413e6a920 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -29,12 +29,6 @@ #include "radeon.h" #include "atom.h" -extern int atom_debug; - -/* evil but including atombios.h is much worse */ -bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, - struct drm_display_mode *mode); - static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -156,27 +150,6 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 return ret; } -static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - return true; - default: - return false; - } -} - void radeon_link_encoder_connector(struct drm_device *dev) { @@ -229,23 +202,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) return NULL; } -static struct drm_connector * -radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - radeon_connector = to_radeon_connector(connector); - if (radeon_encoder->devices & radeon_connector->devices) - return connector; - } - return NULL; -} - -struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) +struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -266,9 +223,9 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder return NULL; } -bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) +u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) { - struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); + struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder); if (other_encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); @@ -332,2105 +289,3 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder, } -static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); - drm_mode_set_crtcinfo(adjusted_mode, 0); - - /* hw bug */ - if ((mode->flags & DRM_MODE_FLAG_INTERLACE) - && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) - adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; - - /* get the native mode for LVDS */ - if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) - radeon_panel_mode_fixup(encoder, adjusted_mode); - - /* get the native mode for TV */ - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { - struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; - if (tv_dac) { - if (tv_dac->tv_std == TV_STD_NTSC || - tv_dac->tv_std == TV_STD_NTSC_J || - tv_dac->tv_std == TV_STD_PAL_M) - radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); - else - radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); - } - } - - if (ASIC_IS_DCE3(rdev) && - ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || - radeon_encoder_is_dp_bridge(encoder))) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - radeon_dp_set_link_config(connector, mode); - } - - return true; -} - -static void -atombios_dac_setup(struct drm_encoder *encoder, int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DAC_ENCODER_CONTROL_PS_ALLOCATION args; - int index = 0; - struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - - memset(&args, 0, sizeof(args)); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); - break; - } - - args.ucAction = action; - - if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) - args.ucDacStandard = ATOM_DAC1_PS2; - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.ucDacStandard = ATOM_DAC1_CV; - else { - switch (dac_info->tv_std) { - case TV_STD_PAL: - case TV_STD_PAL_M: - case TV_STD_SCART_PAL: - case TV_STD_SECAM: - case TV_STD_PAL_CN: - args.ucDacStandard = ATOM_DAC1_PAL; - break; - case TV_STD_NTSC: - case TV_STD_NTSC_J: - case TV_STD_PAL_60: - default: - args.ucDacStandard = ATOM_DAC1_NTSC; - break; - } - } - args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} - -static void -atombios_tv_setup(struct drm_encoder *encoder, int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - TV_ENCODER_CONTROL_PS_ALLOCATION args; - int index = 0; - struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - - memset(&args, 0, sizeof(args)); - - index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); - - args.sTVEncoder.ucAction = action; - - if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.sTVEncoder.ucTvStandard = ATOM_TV_CV; - else { - switch (dac_info->tv_std) { - case TV_STD_NTSC: - args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; - break; - case TV_STD_PAL: - args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; - break; - case TV_STD_PAL_M: - args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; - break; - case TV_STD_PAL_60: - args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; - break; - case TV_STD_NTSC_J: - args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; - break; - case TV_STD_SCART_PAL: - args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ - break; - case TV_STD_SECAM: - args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; - break; - case TV_STD_PAL_CN: - args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; - break; - default: - args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; - break; - } - } - - args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} - -union dvo_encoder_control { - ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; - DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; - DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; -}; - -void -atombios_dvo_setup(struct drm_encoder *encoder, int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - union dvo_encoder_control args; - int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); - - memset(&args, 0, sizeof(args)); - - if (ASIC_IS_DCE3(rdev)) { - /* DCE3+ */ - args.dvo_v3.ucAction = action; - args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - args.dvo_v3.ucDVOConfig = 0; /* XXX */ - } else if (ASIC_IS_DCE2(rdev)) { - /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ - args.dvo.sDVOEncoder.ucAction = action; - args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - /* DFP1, CRT1, TV1 depending on the type of port */ - args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; - - if (radeon_encoder->pixel_clock > 165000) - args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; - } else { - /* R4xx, R5xx */ - args.ext_tmds.sXTmdsEncoder.ucEnable = action; - - if (radeon_encoder->pixel_clock > 165000) - args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; - - /*if (pScrn->rgbBits == 8)*/ - args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); -} - -union lvds_encoder_control { - LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; - LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; -}; - -void -atombios_digital_setup(struct drm_encoder *encoder, int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - union lvds_encoder_control args; - int index = 0; - int hdmi_detected = 0; - uint8_t frev, crev; - - if (!dig) - return; - - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) - hdmi_detected = 1; - - memset(&args, 0, sizeof(args)); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); - else - index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); - break; - } - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return; - - switch (frev) { - case 1: - case 2: - switch (crev) { - case 1: - args.v1.ucMisc = 0; - args.v1.ucAction = action; - if (hdmi_detected) - args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; - args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) - args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) - args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; - } else { - if (dig->linkb) - args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; - /*if (pScrn->rgbBits == 8) */ - args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; - } - break; - case 2: - case 3: - args.v2.ucMisc = 0; - args.v2.ucAction = action; - if (crev == 3) { - if (dig->coherent_mode) - args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; - } - if (hdmi_detected) - args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; - args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - args.v2.ucTruncate = 0; - args.v2.ucSpatial = 0; - args.v2.ucTemporal = 0; - args.v2.ucFRC = 0; - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) - args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { - args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; - if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) - args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; - } - if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { - args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; - if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) - args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; - if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) - args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; - } - } else { - if (dig->linkb) - args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; - if (radeon_encoder->pixel_clock > 165000) - args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; - } - break; - default: - DRM_ERROR("Unknown table version %d, %d\n", frev, crev); - break; - } - break; - default: - DRM_ERROR("Unknown table version %d, %d\n", frev, crev); - break; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); -} - -int -atombios_get_encoder_mode(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; - - /* dp bridges are always DP */ - if (radeon_encoder_is_dp_bridge(encoder)) - return ATOM_ENCODER_MODE_DP; - - /* DVO is always DVO */ - if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) - return ATOM_ENCODER_MODE_DVO; - - connector = radeon_get_connector_for_encoder(encoder); - /* if we don't have an active device yet, just use one of - * the connectors tied to the encoder. - */ - if (!connector) - connector = radeon_get_connector_for_encoder_init(encoder); - radeon_connector = to_radeon_connector(connector); - - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ - if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { - /* fix me */ - if (ASIC_IS_DCE4(rdev)) - return ATOM_ENCODER_MODE_DVI; - else - return ATOM_ENCODER_MODE_HDMI; - } else if (radeon_connector->use_digital) - return ATOM_ENCODER_MODE_DVI; - else - return ATOM_ENCODER_MODE_CRT; - break; - case DRM_MODE_CONNECTOR_DVID: - case DRM_MODE_CONNECTOR_HDMIA: - default: - if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { - /* fix me */ - if (ASIC_IS_DCE4(rdev)) - return ATOM_ENCODER_MODE_DVI; - else - return ATOM_ENCODER_MODE_HDMI; - } else - return ATOM_ENCODER_MODE_DVI; - break; - case DRM_MODE_CONNECTOR_LVDS: - return ATOM_ENCODER_MODE_LVDS; - break; - case DRM_MODE_CONNECTOR_DisplayPort: - dig_connector = radeon_connector->con_priv; - if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || - (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) - return ATOM_ENCODER_MODE_DP; - else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { - /* fix me */ - if (ASIC_IS_DCE4(rdev)) - return ATOM_ENCODER_MODE_DVI; - else - return ATOM_ENCODER_MODE_HDMI; - } else - return ATOM_ENCODER_MODE_DVI; - break; - case DRM_MODE_CONNECTOR_eDP: - return ATOM_ENCODER_MODE_DP; - case DRM_MODE_CONNECTOR_DVIA: - case DRM_MODE_CONNECTOR_VGA: - return ATOM_ENCODER_MODE_CRT; - break; - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_9PinDIN: - /* fix me */ - return ATOM_ENCODER_MODE_TV; - /*return ATOM_ENCODER_MODE_CV;*/ - break; - } -} - -/* - * DIG Encoder/Transmitter Setup - * - * DCE 3.0/3.1 - * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. - * Supports up to 3 digital outputs - * - 2 DIG encoder blocks. - * DIG1 can drive UNIPHY link A or link B - * DIG2 can drive UNIPHY link B or LVTMA - * - * DCE 3.2 - * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). - * Supports up to 5 digital outputs - * - 2 DIG encoder blocks. - * DIG1/2 can drive UNIPHY0/1/2 link A or link B - * - * DCE 4.0/5.0 - * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). - * Supports up to 6 digital outputs - * - 6 DIG encoder blocks. - * - DIG to PHY mapping is hardcoded - * DIG1 drives UNIPHY0 link A, A+B - * DIG2 drives UNIPHY0 link B - * DIG3 drives UNIPHY1 link A, A+B - * DIG4 drives UNIPHY1 link B - * DIG5 drives UNIPHY2 link A, A+B - * DIG6 drives UNIPHY2 link B - * - * DCE 4.1 - * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). - * Supports up to 6 digital outputs - * - 2 DIG encoder blocks. - * DIG1/2 can drive UNIPHY0/1/2 link A or link B - * - * Routing - * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) - * Examples: - * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI - * crtc1 -> dig1 -> UNIPHY0 link B -> DP - * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS - * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI - */ - -union dig_encoder_control { - DIG_ENCODER_CONTROL_PS_ALLOCATION v1; - DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; - DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; - DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; -}; - -void -atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - union dig_encoder_control args; - int index = 0; - uint8_t frev, crev; - int dp_clock = 0; - int dp_lane_count = 0; - int hpd_id = RADEON_HPD_NONE; - int bpc = 8; - - if (connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector = - radeon_connector->con_priv; - - dp_clock = dig_connector->dp_clock; - dp_lane_count = dig_connector->dp_lane_count; - hpd_id = radeon_connector->hpd.hpd; - bpc = connector->display_info.bpc; - } - - /* no dig encoder assigned */ - if (dig->dig_encoder == -1) - return; - - memset(&args, 0, sizeof(args)); - - if (ASIC_IS_DCE4(rdev)) - index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); - else { - if (dig->dig_encoder) - index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); - else - index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); - } - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return; - - args.v1.ucAction = action; - args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) - args.v3.ucPanelMode = panel_mode; - else - args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); - - if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || - (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) - args.v1.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) - args.v1.ucLaneNum = 8; - else - args.v1.ucLaneNum = 4; - - if (ASIC_IS_DCE5(rdev)) { - if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || - (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { - if (dp_clock == 270000) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; - else if (dp_clock == 540000) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; - } - args.v4.acConfig.ucDigSel = dig->dig_encoder; - switch (bpc) { - case 0: - args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; - break; - case 6: - args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; - break; - case 8: - default: - args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; - break; - case 10: - args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; - break; - case 12: - args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; - break; - case 16: - args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; - break; - } - if (hpd_id == RADEON_HPD_NONE) - args.v4.ucHPD_ID = 0; - else - args.v4.ucHPD_ID = hpd_id + 1; - } else if (ASIC_IS_DCE4(rdev)) { - if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; - args.v3.acConfig.ucDigSel = dig->dig_encoder; - switch (bpc) { - case 0: - args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; - break; - case 6: - args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; - break; - case 8: - default: - args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; - break; - case 10: - args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; - break; - case 12: - args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; - break; - case 16: - args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; - break; - } - } else { - if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; - break; - } - if (dig->linkb) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; - else - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} - -union dig_transmitter_control { - DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; - DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; - DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; - DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; -}; - -void -atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct drm_connector *connector; - union dig_transmitter_control args; - int index = 0; - uint8_t frev, crev; - bool is_dp = false; - int pll_id = 0; - int dp_clock = 0; - int dp_lane_count = 0; - int connector_object_id = 0; - int igp_lane_info = 0; - int dig_encoder = dig->dig_encoder; - - if (action == ATOM_TRANSMITTER_ACTION_INIT) { - connector = radeon_get_connector_for_encoder_init(encoder); - /* just needed to avoid bailing in the encoder check. the encoder - * isn't used for init - */ - dig_encoder = 0; - } else - connector = radeon_get_connector_for_encoder(encoder); - - if (connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector = - radeon_connector->con_priv; - - dp_clock = dig_connector->dp_clock; - dp_lane_count = dig_connector->dp_lane_count; - connector_object_id = - (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - igp_lane_info = dig_connector->igp_lane_info; - } - - /* no dig encoder assigned */ - if (dig_encoder == -1) - return; - - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) - is_dp = true; - - memset(&args, 0, sizeof(args)); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); - break; - } - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return; - - args.v1.ucAction = action; - if (action == ATOM_TRANSMITTER_ACTION_INIT) { - args.v1.usInitInfo = cpu_to_le16(connector_object_id); - } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { - args.v1.asMode.ucLaneSel = lane_num; - args.v1.asMode.ucLaneSet = lane_set; - } else { - if (is_dp) - args.v1.usPixelClock = - cpu_to_le16(dp_clock / 10); - else if (radeon_encoder->pixel_clock > 165000) - args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); - else - args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - } - if (ASIC_IS_DCE4(rdev)) { - if (is_dp) - args.v3.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) - args.v3.ucLaneNum = 8; - else - args.v3.ucLaneNum = 4; - - if (dig->linkb) - args.v3.acConfig.ucLinkSel = 1; - if (dig_encoder & 1) - args.v3.acConfig.ucEncoderSel = 1; - - /* Select the PLL for the PHY - * DP PHY should be clocked from external src if there is - * one. - */ - if (encoder->crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - pll_id = radeon_crtc->pll_id; - } - - if (ASIC_IS_DCE5(rdev)) { - /* On DCE5 DCPLL usually generates the DP ref clock */ - if (is_dp) { - if (rdev->clock.dp_extclk) - args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; - else - args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; - } else - args.v4.acConfig.ucRefClkSource = pll_id; - } else { - /* On DCE4, if there is an external clock, it generates the DP ref clock */ - if (is_dp && rdev->clock.dp_extclk) - args.v3.acConfig.ucRefClkSource = 2; /* external src */ - else - args.v3.acConfig.ucRefClkSource = pll_id; - } - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v3.acConfig.ucTransmitterSel = 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - args.v3.acConfig.ucTransmitterSel = 1; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - args.v3.acConfig.ucTransmitterSel = 2; - break; - } - - if (is_dp) - args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ - else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { - if (dig->coherent_mode) - args.v3.acConfig.fCoherentMode = 1; - if (radeon_encoder->pixel_clock > 165000) - args.v3.acConfig.fDualLinkConnector = 1; - } - } else if (ASIC_IS_DCE32(rdev)) { - args.v2.acConfig.ucEncoderSel = dig_encoder; - if (dig->linkb) - args.v2.acConfig.ucLinkSel = 1; - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v2.acConfig.ucTransmitterSel = 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - args.v2.acConfig.ucTransmitterSel = 1; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - args.v2.acConfig.ucTransmitterSel = 2; - break; - } - - if (is_dp) { - args.v2.acConfig.fCoherentMode = 1; - args.v2.acConfig.fDPConnector = 1; - } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { - if (dig->coherent_mode) - args.v2.acConfig.fCoherentMode = 1; - if (radeon_encoder->pixel_clock > 165000) - args.v2.acConfig.fDualLinkConnector = 1; - } - } else { - args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; - - if (dig_encoder) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; - - if ((rdev->flags & RADEON_IS_IGP) && - (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { - if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { - if (igp_lane_info & 0x1) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else if (igp_lane_info & 0x2) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; - else if (igp_lane_info & 0x4) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; - else if (igp_lane_info & 0x8) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; - } else { - if (igp_lane_info & 0x3) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; - else if (igp_lane_info & 0xc) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; - } - } - - if (dig->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; - - if (is_dp) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; - else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { - if (dig->coherent_mode) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; - } - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); -} - -bool -atombios_set_edp_panel_power(struct drm_connector *connector, int action) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_device *dev = radeon_connector->base.dev; - struct radeon_device *rdev = dev->dev_private; - union dig_transmitter_control args; - int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); - uint8_t frev, crev; - - if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) - goto done; - - if (!ASIC_IS_DCE4(rdev)) - goto done; - - if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && - (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) - goto done; - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - goto done; - - memset(&args, 0, sizeof(args)); - - args.v1.ucAction = action; - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - /* wait for the panel to power up */ - if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { - int i; - - for (i = 0; i < 300; i++) { - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) - return true; - mdelay(1); - } - return false; - } -done: - return true; -} - -union external_encoder_control { - EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; - EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; -}; - -static void -atombios_external_encoder_setup(struct drm_encoder *encoder, - struct drm_encoder *ext_encoder, - int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); - union external_encoder_control args; - struct drm_connector *connector; - int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); - u8 frev, crev; - int dp_clock = 0; - int dp_lane_count = 0; - int connector_object_id = 0; - u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; - int bpc = 8; - - if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) - connector = radeon_get_connector_for_encoder_init(encoder); - else - connector = radeon_get_connector_for_encoder(encoder); - - if (connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector = - radeon_connector->con_priv; - - dp_clock = dig_connector->dp_clock; - dp_lane_count = dig_connector->dp_lane_count; - connector_object_id = - (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - bpc = connector->display_info.bpc; - } - - memset(&args, 0, sizeof(args)); - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return; - - switch (frev) { - case 1: - /* no params on frev 1 */ - break; - case 2: - switch (crev) { - case 1: - case 2: - args.v1.sDigEncoder.ucAction = action; - args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); - - if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dp_clock == 270000) - args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; - args.v1.sDigEncoder.ucLaneNum = dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) - args.v1.sDigEncoder.ucLaneNum = 8; - else - args.v1.sDigEncoder.ucLaneNum = 4; - break; - case 3: - args.v3.sExtEncoder.ucAction = action; - if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) - args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); - else - args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); - - if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dp_clock == 270000) - args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; - else if (dp_clock == 540000) - args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; - args.v3.sExtEncoder.ucLaneNum = dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) - args.v3.sExtEncoder.ucLaneNum = 8; - else - args.v3.sExtEncoder.ucLaneNum = 4; - switch (ext_enum) { - case GRAPH_OBJECT_ENUM_ID1: - args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; - break; - case GRAPH_OBJECT_ENUM_ID2: - args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; - break; - case GRAPH_OBJECT_ENUM_ID3: - args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; - break; - } - switch (bpc) { - case 0: - args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; - break; - case 6: - args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; - break; - case 8: - default: - args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; - break; - case 10: - args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; - break; - case 12: - args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; - break; - case 16: - args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; - break; - } - break; - default: - DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); - return; - } - break; - default: - DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); - return; - } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); -} - -static void -atombios_yuv_setup(struct drm_encoder *encoder, bool enable) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - ENABLE_YUV_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); - uint32_t temp, reg; - - memset(&args, 0, sizeof(args)); - - if (rdev->family >= CHIP_R600) - reg = R600_BIOS_3_SCRATCH; - else - reg = RADEON_BIOS_3_SCRATCH; - - /* XXX: fix up scratch reg handling */ - temp = RREG32(reg); - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - WREG32(reg, (ATOM_S3_TV1_ACTIVE | - (radeon_crtc->crtc_id << 18))); - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); - else - WREG32(reg, 0); - - if (enable) - args.ucEnable = ATOM_ENABLE; - args.ucCRTC = radeon_crtc->crtc_id; - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - WREG32(reg, temp); -} - -static void -radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; - int index = 0; - bool is_dig = false; - bool is_dce5_dac = false; - bool is_dce5_dvo = false; - - memset(&args, 0, sizeof(args)); - - DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", - radeon_encoder->encoder_id, mode, radeon_encoder->devices, - radeon_encoder->active_device); - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - is_dig = true; - break; - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - if (ASIC_IS_DCE5(rdev)) - is_dce5_dvo = true; - else if (ASIC_IS_DCE3(rdev)) - is_dig = true; - else - index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); - else - index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - if (ASIC_IS_DCE5(rdev)) - is_dce5_dac = true; - else { - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); - else - index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); - } - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); - else - index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); - break; - } - - if (is_dig) { - switch (mode) { - case DRM_MODE_DPMS_ON: - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - else - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - - if (connector && - (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *radeon_dig_connector = - radeon_connector->con_priv; - atombios_set_edp_panel_power(connector, - ATOM_TRANSMITTER_ACTION_POWER_ON); - radeon_dig_connector->edp_on = true; - } - if (ASIC_IS_DCE4(rdev)) - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); - radeon_dp_link_train(encoder, connector); - if (ASIC_IS_DCE4(rdev)) - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); - } - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - - if (ASIC_IS_DCE4(rdev)) - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); - if (connector && - (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *radeon_dig_connector = - radeon_connector->con_priv; - atombios_set_edp_panel_power(connector, - ATOM_TRANSMITTER_ACTION_POWER_OFF); - radeon_dig_connector->edp_on = false; - } - } - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); - break; - } - } else if (is_dce5_dac) { - switch (mode) { - case DRM_MODE_DPMS_ON: - atombios_dac_setup(encoder, ATOM_ENABLE); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - atombios_dac_setup(encoder, ATOM_DISABLE); - break; - } - } else if (is_dce5_dvo) { - switch (mode) { - case DRM_MODE_DPMS_ON: - atombios_dvo_setup(encoder, ATOM_ENABLE); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - atombios_dvo_setup(encoder, ATOM_DISABLE); - break; - } - } else { - switch (mode) { - case DRM_MODE_DPMS_ON: - args.ucAction = ATOM_ENABLE; - /* workaround for DVOOutputControl on some RS690 systems */ - if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { - u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); - WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - WREG32(RADEON_BIOS_3_SCRATCH, reg); - } else - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - args.ucAction = ATOM_DISABLE; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLOFF; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } - break; - } - } - - if (ext_encoder) { - switch (mode) { - case DRM_MODE_DPMS_ON: - default: - if (ASIC_IS_DCE41(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); - break; - } - } - - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -} - -union crtc_source_param { - SELECT_CRTC_SOURCE_PS_ALLOCATION v1; - SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; -}; - -static void -atombios_set_encoder_crtc_source(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - union crtc_source_param args; - int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); - uint8_t frev, crev; - struct radeon_encoder_atom_dig *dig; - - memset(&args, 0, sizeof(args)); - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return; - - switch (frev) { - case 1: - switch (crev) { - case 1: - default: - if (ASIC_IS_AVIVO(rdev)) - args.v1.ucCRTC = radeon_crtc->crtc_id; - else { - if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { - args.v1.ucCRTC = radeon_crtc->crtc_id; - } else { - args.v1.ucCRTC = radeon_crtc->crtc_id << 2; - } - } - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; - break; - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) - args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; - else - args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; - break; - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; - else - args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; - else - args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; - break; - } - break; - case 2: - args.v2.ucCRTC = radeon_crtc->crtc_id; - if (radeon_encoder_is_dp_bridge(encoder)) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) - args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; - else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) - args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; - else - args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); - } else - args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - dig = radeon_encoder->enc_priv; - switch (dig->dig_encoder) { - case 0: - args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; - break; - case 1: - args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; - break; - case 2: - args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; - break; - case 3: - args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; - break; - case 4: - args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; - break; - case 5: - args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; - break; - } - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; - else - args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; - else - args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; - break; - } - break; - } - break; - default: - DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); - return; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - /* update scratch regs with new routing */ - radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); -} - -static void -atombios_apply_encoder_quirks(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - - /* Funky macbooks */ - if ((dev->pdev->device == 0x71C5) && - (dev->pdev->subsystem_vendor == 0x106b) && - (dev->pdev->subsystem_device == 0x0080)) { - if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { - uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); - - lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; - lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; - - WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); - } - } - - /* set scaler clears this on some chips */ - if (ASIC_IS_AVIVO(rdev) && - (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { - if (ASIC_IS_DCE4(rdev)) { - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, - EVERGREEN_INTERLEAVE_EN); - else - WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - } else { - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, - AVIVO_D1MODE_INTERLEAVE_EN); - else - WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - } - } -} - -static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig; - uint32_t dig_enc_in_use = 0; - - /* DCE4/5 */ - if (ASIC_IS_DCE4(rdev)) { - dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) { - /* ontario follows DCE4 */ - if (rdev->family == CHIP_PALM) { - if (dig->linkb) - return 1; - else - return 0; - } else - /* llano follows DCE3.2 */ - return radeon_crtc->crtc_id; - } else { - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return 1; - else - return 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return 3; - else - return 2; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return 5; - else - return 4; - break; - } - } - } - - /* on DCE32 and encoder can driver any block so just crtc id */ - if (ASIC_IS_DCE32(rdev)) { - return radeon_crtc->crtc_id; - } - - /* on DCE3 - LVTMA can only be driven by DIGB */ - list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { - struct radeon_encoder *radeon_test_encoder; - - if (encoder == test_encoder) - continue; - - if (!radeon_encoder_is_digital(test_encoder)) - continue; - - radeon_test_encoder = to_radeon_encoder(test_encoder); - dig = radeon_test_encoder->enc_priv; - - if (dig->dig_encoder >= 0) - dig_enc_in_use |= (1 << dig->dig_encoder); - } - - if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { - if (dig_enc_in_use & 0x2) - DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); - return 1; - } - if (!(dig_enc_in_use & 1)) - return 0; - return 1; -} - -/* This only needs to be called once at startup */ -void -radeon_atom_encoder_init(struct radeon_device *rdev) -{ - struct drm_device *dev = rdev->ddev; - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); - break; - default: - break; - } - - if (ext_encoder && ASIC_IS_DCE41(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); - } -} - -static void -radeon_atom_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - - radeon_encoder->pixel_clock = adjusted_mode->clock; - - if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { - if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) - atombios_yuv_setup(encoder, true); - else - atombios_yuv_setup(encoder, false); - } - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } - break; - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - atombios_dvo_setup(encoder, ATOM_ENABLE); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - atombios_dac_setup(encoder, ATOM_ENABLE); - if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - atombios_tv_setup(encoder, ATOM_ENABLE); - else - atombios_tv_setup(encoder, ATOM_DISABLE); - } - break; - } - - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); - else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - } - - atombios_apply_encoder_quirks(encoder, adjusted_mode); - - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { - r600_hdmi_enable(encoder); - r600_hdmi_setmode(encoder, adjusted_mode); - } -} - -static bool -atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - - if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | - ATOM_DEVICE_CV_SUPPORT | - ATOM_DEVICE_CRT_SUPPORT)) { - DAC_LOAD_DETECTION_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); - uint8_t frev, crev; - - memset(&args, 0, sizeof(args)); - - if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) - return false; - - args.sDacload.ucMisc = 0; - - if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || - (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) - args.sDacload.ucDacType = ATOM_DAC_A; - else - args.sDacload.ucDacType = ATOM_DAC_B; - - if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) - args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); - else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) - args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); - else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { - args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); - if (crev >= 3) - args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; - } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { - args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); - if (crev >= 3) - args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - return true; - } else - return false; -} - -static enum drm_connector_status -radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - uint32_t bios_0_scratch; - - if (!atombios_dac_load_detect(encoder, connector)) { - DRM_DEBUG_KMS("detect returned false \n"); - return connector_status_unknown; - } - - if (rdev->family >= CHIP_R600) - bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); - else - bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); - - DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); - if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT1_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT2_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) - return connector_status_connected; /* CTV */ - else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) - return connector_status_connected; /* STV */ - } - return connector_status_disconnected; -} - -static enum drm_connector_status -radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - u32 bios_0_scratch; - - if (!ASIC_IS_DCE4(rdev)) - return connector_status_unknown; - - if (!ext_encoder) - return connector_status_unknown; - - if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) - return connector_status_unknown; - - /* load detect on the dp bridge */ - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); - - bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); - - DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); - if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT1_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { - if (bios_0_scratch & ATOM_S0_CRT2_MASK) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) - return connector_status_connected; - } - if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { - if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) - return connector_status_connected; /* CTV */ - else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) - return connector_status_connected; /* STV */ - } - return connector_status_disconnected; -} - -void -radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) -{ - struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); - - if (ext_encoder) - /* ddc_setup on the dp bridge */ - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); - -} - -static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - - if ((radeon_encoder->active_device & - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || - radeon_encoder_is_dp_bridge(encoder)) { - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (dig) - dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); - } - - radeon_atom_output_lock(encoder, true); - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - if (connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - - /* select the clock/data port if it uses a router */ - if (radeon_connector->router.cd_valid) - radeon_router_select_cd_port(radeon_connector); - - /* turn eDP panel on for mode set */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - atombios_set_edp_panel_power(connector, - ATOM_TRANSMITTER_ACTION_POWER_ON); - } - - /* this is needed for the pll/ss setup to work correctly in some cases */ - atombios_set_encoder_crtc_source(encoder); -} - -static void radeon_atom_encoder_commit(struct drm_encoder *encoder) -{ - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); - radeon_atom_output_lock(encoder, false); -} - -static void radeon_atom_encoder_disable(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig; - - /* check for pre-DCE3 cards with shared encoders; - * can't really use the links individually, so don't disable - * the encoder if it's in use by another connector - */ - if (!ASIC_IS_DCE3(rdev)) { - struct drm_encoder *other_encoder; - struct radeon_encoder *other_radeon_encoder; - - list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { - other_radeon_encoder = to_radeon_encoder(other_encoder); - if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && - drm_helper_encoder_in_use(other_encoder)) - goto disable_done; - } - } - - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } - break; - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - atombios_dvo_setup(encoder, ATOM_DISABLE); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - atombios_dac_setup(encoder, ATOM_DISABLE); - if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - atombios_tv_setup(encoder, ATOM_DISABLE); - break; - } - -disable_done: - if (radeon_encoder_is_digital(encoder)) { - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) - r600_hdmi_disable(encoder); - dig = radeon_encoder->enc_priv; - dig->dig_encoder = -1; - } - radeon_encoder->active_device = 0; -} - -/* these are handled by the primary encoders */ -static void radeon_atom_ext_prepare(struct drm_encoder *encoder) -{ - -} - -static void radeon_atom_ext_commit(struct drm_encoder *encoder) -{ - -} - -static void -radeon_atom_ext_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - -} - -static void radeon_atom_ext_disable(struct drm_encoder *encoder) -{ - -} - -static void -radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) -{ - -} - -static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { - .dpms = radeon_atom_ext_dpms, - .mode_fixup = radeon_atom_ext_mode_fixup, - .prepare = radeon_atom_ext_prepare, - .mode_set = radeon_atom_ext_mode_set, - .commit = radeon_atom_ext_commit, - .disable = radeon_atom_ext_disable, - /* no detect for TMDS/LVDS yet */ -}; - -static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { - .dpms = radeon_atom_encoder_dpms, - .mode_fixup = radeon_atom_mode_fixup, - .prepare = radeon_atom_encoder_prepare, - .mode_set = radeon_atom_encoder_mode_set, - .commit = radeon_atom_encoder_commit, - .disable = radeon_atom_encoder_disable, - .detect = radeon_atom_dig_detect, -}; - -static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { - .dpms = radeon_atom_encoder_dpms, - .mode_fixup = radeon_atom_mode_fixup, - .prepare = radeon_atom_encoder_prepare, - .mode_set = radeon_atom_encoder_mode_set, - .commit = radeon_atom_encoder_commit, - .detect = radeon_atom_dac_detect, -}; - -void radeon_enc_destroy(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - kfree(radeon_encoder->enc_priv); - drm_encoder_cleanup(encoder); - kfree(radeon_encoder); -} - -static const struct drm_encoder_funcs radeon_atom_enc_funcs = { - .destroy = radeon_enc_destroy, -}; - -struct radeon_encoder_atom_dac * -radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) -{ - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); - - if (!dac) - return NULL; - - dac->tv_std = radeon_atombios_get_tv_info(rdev); - return dac; -} - -struct radeon_encoder_atom_dig * -radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) -{ - int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; - struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); - - if (!dig) - return NULL; - - /* coherent mode by default */ - dig->coherent_mode = true; - dig->dig_encoder = -1; - - if (encoder_enum == 2) - dig->linkb = true; - else - dig->linkb = false; - - return dig; -} - -void -radeon_add_atom_encoder(struct drm_device *dev, - uint32_t encoder_enum, - uint32_t supported_device, - u16 caps) -{ - struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *encoder; - struct radeon_encoder *radeon_encoder; - - /* see if we already added it */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->encoder_enum == encoder_enum) { - radeon_encoder->devices |= supported_device; - return; - } - - } - - /* add a new one */ - radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); - if (!radeon_encoder) - return; - - encoder = &radeon_encoder->base; - switch (rdev->num_crtc) { - case 1: - encoder->possible_crtcs = 0x1; - break; - case 2: - default: - encoder->possible_crtcs = 0x3; - break; - case 4: - encoder->possible_crtcs = 0xf; - break; - case 6: - encoder->possible_crtcs = 0x3f; - break; - } - - radeon_encoder->enc_priv = NULL; - - radeon_encoder->encoder_enum = encoder_enum; - radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - radeon_encoder->devices = supported_device; - radeon_encoder->rmx_type = RMX_OFF; - radeon_encoder->underscan_type = UNDERSCAN_OFF; - radeon_encoder->is_ext_encoder = false; - radeon_encoder->caps = caps; - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - radeon_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); - radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); - } else { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); - radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); - } - drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC1: - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); - radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); - drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); - break; - case ENCODER_OBJECT_ID_INTERNAL_DAC2: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); - radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); - drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); - break; - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - radeon_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); - radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); - } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); - radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); - } else { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); - radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); - } - drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); - break; - case ENCODER_OBJECT_ID_SI170B: - case ENCODER_OBJECT_ID_CH7303: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: - case ENCODER_OBJECT_ID_TITFP513: - case ENCODER_OBJECT_ID_VT1623: - case ENCODER_OBJECT_ID_HDMI_SI1930: - case ENCODER_OBJECT_ID_TRAVIS: - case ENCODER_OBJECT_ID_NUTMEG: - /* these are handled by the primary encoders */ - radeon_encoder->is_ext_encoder = true; - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); - else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); - else - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); - break; - } -} diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index fdc3a9a54bf8..ba7ab79e12c1 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) rdev->gart.table_size >> PAGE_SHIFT); } #endif - rdev->gart.table.ram.ptr = ptr; - memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size); + rdev->gart.ptr = ptr; + memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); return 0; } void radeon_gart_table_ram_free(struct radeon_device *rdev) { - if (rdev->gart.table.ram.ptr == NULL) { + if (rdev->gart.ptr == NULL) { return; } #ifdef CONFIG_X86 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { - set_memory_wb((unsigned long)rdev->gart.table.ram.ptr, + set_memory_wb((unsigned long)rdev->gart.ptr, rdev->gart.table_size >> PAGE_SHIFT); } #endif pci_free_consistent(rdev->pdev, rdev->gart.table_size, - (void *)rdev->gart.table.ram.ptr, + (void *)rdev->gart.ptr, rdev->gart.table_addr); - rdev->gart.table.ram.ptr = NULL; + rdev->gart.ptr = NULL; rdev->gart.table_addr = 0; } @@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - &rdev->gart.table.vram.robj); + &rdev->gart.robj); if (r) { return r; } @@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) uint64_t gpu_addr; int r; - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + r = radeon_bo_reserve(rdev->gart.robj, false); if (unlikely(r != 0)) return r; - r = radeon_bo_pin(rdev->gart.table.vram.robj, + r = radeon_bo_pin(rdev->gart.robj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); if (r) { - radeon_bo_unreserve(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.robj); return r; } - r = radeon_bo_kmap(rdev->gart.table.vram.robj, - (void **)&rdev->gart.table.vram.ptr); + r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); if (r) - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.robj); + radeon_bo_unreserve(rdev->gart.robj); rdev->gart.table_addr = gpu_addr; return r; } -void radeon_gart_table_vram_free(struct radeon_device *rdev) +void radeon_gart_table_vram_unpin(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { return; } - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + r = radeon_bo_reserve(rdev->gart.robj, false); if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); + radeon_bo_kunmap(rdev->gart.robj); + radeon_bo_unpin(rdev->gart.robj); + radeon_bo_unreserve(rdev->gart.robj); + rdev->gart.ptr = NULL; } - radeon_bo_unref(&rdev->gart.table.vram.robj); +} + +void radeon_gart_table_vram_free(struct radeon_device *rdev) +{ + if (rdev->gart.robj == NULL) { + return; + } + radeon_gart_table_vram_unpin(rdev); + radeon_bo_unref(&rdev->gart.robj); } @@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, if (rdev->gart.pages[p]) { if (!rdev->gart.ttm_alloced[p]) pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); rdev->gart.pages[p] = NULL; rdev->gart.pages_addr[p] = rdev->dummy_page.addr; page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base); + if (rdev->gart.ptr) { + radeon_gart_set_page(rdev, t, page_base); + } page_base += RADEON_GPU_PAGE_SIZE; } } @@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, } } rdev->gart.pages[p] = pagelist[i]; - page_base = rdev->gart.pages_addr[p]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base); - page_base += RADEON_GPU_PAGE_SIZE; + if (rdev->gart.ptr) { + page_base = rdev->gart.pages_addr[p]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + radeon_gart_set_page(rdev, t, page_base); + page_base += RADEON_GPU_PAGE_SIZE; + } } } mb(); @@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev) int i, j, t; u64 page_base; + if (!rdev->gart.ptr) { + return; + } for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { page_base = rdev->gart.pages_addr[i]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 02cb7da4124d..7bb1b079f480 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -23,6 +23,8 @@ * Authors: Dave Airlie * Alex Deucher */ +#include <linux/export.h> + #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" @@ -32,7 +34,7 @@ * radeon_ddc_probe * */ -bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) +bool radeon_ddc_probe(struct radeon_connector *radeon_connector) { u8 out = 0x0; u8 buf[8]; @@ -47,15 +49,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e { .addr = 0x50, .flags = I2C_M_RD, - .len = 1, + .len = 8, .buf = buf, } }; - /* Read 8 bytes from i2c for extended probe of EDID header */ - if (requires_extended_probe) - msgs[1].len = 8; - /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); @@ -64,17 +62,15 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e if (ret != 2) /* Couldn't find an accessible DDC on this connector */ return false; - if (requires_extended_probe) { - /* Probe also for valid EDID header - * EDID header starts with: - * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. - * Only the first 6 bytes must be valid as - * drm_edid_block_valid() can fix the last 2 bytes */ - if (drm_edid_header_is_valid(buf) < 6) { - /* Couldn't find an accessible EDID on this - * connector */ - return false; - } + /* Probe also for valid EDID header + * EDID header starts with: + * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. + * Only the first 6 bytes must be valid as + * drm_edid_block_valid() can fix the last 2 bytes */ + if (drm_edid_header_is_valid(buf) < 6) { + /* Couldn't find an accessible EDID on this + * connector */ + return false; } return true; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 9ec830c77af0..8f86aeb26693 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -67,10 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) /* Disable *all* interrupts */ rdev->irq.sw_int = false; rdev->irq.gui_idle = false; - for (i = 0; i < rdev->num_crtc; i++) - rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) { + for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; + for (i = 0; i < RADEON_MAX_CRTCS; i++) { + rdev->irq.crtc_vblank_int[i] = false; rdev->irq.pflip[i] = false; } radeon_irq_set(rdev); @@ -99,15 +99,55 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) /* Disable *all* interrupts */ rdev->irq.sw_int = false; rdev->irq.gui_idle = false; - for (i = 0; i < rdev->num_crtc; i++) - rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) { + for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; + for (i = 0; i < RADEON_MAX_CRTCS; i++) { + rdev->irq.crtc_vblank_int[i] = false; rdev->irq.pflip[i] = false; } radeon_irq_set(rdev); } +static bool radeon_msi_ok(struct radeon_device *rdev) +{ + /* RV370/RV380 was first asic with MSI support */ + if (rdev->family < CHIP_RV380) + return false; + + /* MSIs don't work on AGP */ + if (rdev->flags & RADEON_IS_AGP) + return false; + + /* force MSI on */ + if (radeon_msi == 1) + return true; + else if (radeon_msi == 0) + return false; + + /* Quirks */ + /* HP RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x103c) && + (rdev->pdev->subsystem_device == 0x30c2)) + return true; + + /* Dell RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x1028) && + (rdev->pdev->subsystem_device == 0x01fd)) + return true; + + if (rdev->flags & RADEON_IS_IGP) { + /* APUs work fine with MSIs */ + if (rdev->family >= CHIP_PALM) + return true; + /* lots of IGPs have problems with MSIs */ + return false; + } + + return true; +} + int radeon_irq_kms_init(struct radeon_device *rdev) { int i; @@ -124,12 +164,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev) } /* enable msi */ rdev->msi_enabled = 0; - /* MSIs don't seem to work reliably on all IGP - * chips. Disable MSI on them for now. - */ - if ((rdev->family >= CHIP_RV380) && - ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && - (!(rdev->flags & RADEON_IS_AGP))) { + + if (radeon_msi_ok(rdev)) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 41a5d48e657b..daadf2111040 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ed0178f03235..2c2e75ef8a37 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -438,9 +438,6 @@ struct radeon_connector { struct radeon_i2c_chan *ddc_bus; /* some systems have an hdmi and vga port with a shared ddc line */ bool shared_ddc; - /* for some Radeon chip families we apply an additional EDID header - check as part of the DDC probe */ - bool requires_extended_probe; bool use_digital; /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ @@ -459,6 +456,8 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; +#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ + ((em) == ATOM_ENCODER_MODE_DP_MST)) extern enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev); @@ -468,8 +467,8 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); extern struct drm_connector * radeon_get_connector_for_encoder(struct drm_encoder *encoder); -extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); -extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); +extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); +extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); @@ -489,7 +488,7 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set); extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); -extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder); +extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, u8 write_byte, u8 *read_byte); @@ -519,8 +518,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, u8 val); extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, - bool requires_extended_probe); +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fabe89fa6a1..78a665bd9519 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev); #define ACPI_AC_CLASS "ac_adapter" +int radeon_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance) +{ + int i; + int found_instance = -1; + + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].type == ps_type) { + found_instance++; + if (found_instance == instance) + return i; + } + } + /* return default if no match */ + return rdev->pm.default_power_state_index; +} + #ifdef CONFIG_ACPI static int radeon_acpi_event(struct notifier_block *nb, unsigned long val, diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 89a6e1ecea8d..06b90c87f8f3 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev) { int r; - if (rdev->gart.table.ram.ptr) { + if (rdev->gart.ptr) { WARN(1, "RS400 GART already initialized\n"); return 0; } @@ -212,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev) int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { uint32_t entry; + u32 *gtt = rdev->gart.ptr; if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; @@ -221,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) ((upper_32_bits(addr) & 0xff) << 4) | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; entry = cpu_to_le32(entry); - rdev->gart.table.ram.ptr[i] = entry; + gtt[i] = entry; return 0; } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9320dd6404f6..b1053d640423 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -287,6 +292,7 @@ void rs600_hpd_init(struct radeon_device *rdev) default: break; } + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } if (rdev->irq.installed) rs600_irq_set(rdev); @@ -413,7 +419,7 @@ int rs600_gart_init(struct radeon_device *rdev) { int r; - if (rdev->gart.table.vram.robj) { + if (rdev->gart.robj) { WARN(1, "RS600 GART already initialized\n"); return 0; } @@ -431,7 +437,7 @@ static int rs600_gart_enable(struct radeon_device *rdev) u32 tmp; int r, i; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -494,20 +500,12 @@ static int rs600_gart_enable(struct radeon_device *rdev) void rs600_gart_disable(struct radeon_device *rdev) { u32 tmp; - int r; /* FIXME: disable out of gart access */ WREG32_MC(R_000100_MC_PT0_CNTL, 0); tmp = RREG32_MC(R_000009_MC_CNTL1); WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (r == 0) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void rs600_gart_fini(struct radeon_device *rdev) @@ -525,7 +523,7 @@ void rs600_gart_fini(struct radeon_device *rdev) int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + void __iomem *ptr = (void *)rdev->gart.ptr; if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 87cc1feee3ac..23ae1c60ab3d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -124,7 +129,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) u32 tmp; int r, i; - if (rdev->gart.table.vram.robj == NULL) { + if (rdev->gart.robj == NULL) { dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -171,7 +176,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) void rv770_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i, r; + int i; /* Disable all tables */ for (i = 0; i < 7; i++) @@ -191,14 +196,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); - if (rdev->gart.table.vram.robj) { - r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->gart.table.vram.robj); - radeon_bo_unpin(rdev->gart.table.vram.robj); - radeon_bo_unreserve(rdev->gart.table.vram.robj); - } - } + radeon_gart_table_vram_unpin(rdev); } void rv770_pcie_gart_fini(struct radeon_device *rdev) @@ -282,7 +280,7 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); } - WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); @@ -959,54 +957,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) } -static int rv770_vram_scratch_init(struct radeon_device *rdev) -{ - int r; - u64 gpu_addr; - - if (rdev->vram_scratch.robj == NULL) { - r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, - PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - &rdev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = radeon_bo_reserve(rdev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->vram_scratch.robj, - RADEON_GEM_DOMAIN_VRAM, &gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->vram_scratch.robj); - return r; - } - r = radeon_bo_kmap(rdev->vram_scratch.robj, - (void **)&rdev->vram_scratch.ptr); - if (r) - radeon_bo_unpin(rdev->vram_scratch.robj); - radeon_bo_unreserve(rdev->vram_scratch.robj); - - return r; -} - -static void rv770_vram_scratch_fini(struct radeon_device *rdev) -{ - int r; - - if (rdev->vram_scratch.robj == NULL) { - return; - } - r = radeon_bo_reserve(rdev->vram_scratch.robj, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rdev->vram_scratch.robj); - radeon_bo_unpin(rdev->vram_scratch.robj); - radeon_bo_unreserve(rdev->vram_scratch.robj); - } - radeon_bo_unref(&rdev->vram_scratch.robj); -} - void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { u64 size_bf, size_af; @@ -1106,6 +1056,10 @@ static int rv770_startup(struct radeon_device *rdev) } } + r = r600_vram_scratch_init(rdev); + if (r) + return r; + rv770_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); @@ -1114,9 +1068,7 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; } - r = rv770_vram_scratch_init(rdev); - if (r) - return r; + rv770_gpu_init(rdev); r = r600_blit_init(rdev); if (r) { @@ -1316,7 +1268,7 @@ void rv770_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); - rv770_vram_scratch_fini(rdev); + r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_agp_fini(rdev); diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 6464490b240b..5468d1cd3296 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -23,6 +23,8 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/module.h> + #include "drmP.h" #include "savage_drm.h" #include "savage_drv.h" diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 46d5be6e97e5..a9c5716bea4e 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -25,6 +25,8 @@ * */ +#include <linux/module.h> + #include "drmP.h" #include "sis_drm.h" #include "sis_drv.h" diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 8bf98810a8d6..cda29911e332 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -30,6 +30,8 @@ * Gareth Hughes <gareth@valinux.com> */ +#include <linux/module.h> + #include "drmP.h" #include "tdfx_drv.h" diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 617b64678fc6..0bb0f5f713e6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -574,10 +574,16 @@ retry: return ret; spin_lock(&glob->lru_lock); + + if (unlikely(list_empty(&bo->ddestroy))) { + spin_unlock(&glob->lru_lock); + return 0; + } + ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0); - if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { + if (unlikely(ret != 0)) { spin_unlock(&glob->lru_lock); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 58c271ebc0f7..f9cc548d6d98 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -35,6 +35,7 @@ #include <linux/file.h> #include <linux/swap.h> #include <linux/slab.h> +#include <linux/export.h> #include "drm_cache.h" #include "drm_mem_util.h" #include "ttm/ttm_module.h" diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 920a55214bcf..a83e86d3956c 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -22,6 +22,8 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/module.h> + #include "drmP.h" #include "via_drm.h" #include "via_drv.h" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 13afddc1f034..dff8fc767152 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -24,6 +24,7 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ +#include <linux/module.h> #include "drmP.h" #include "vmwgfx_drv.h" @@ -103,6 +104,9 @@ #define DRM_IOCTL_VMW_PRESENT_READBACK \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ struct drm_vmw_present_readback_arg) +#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ + struct drm_vmw_update_layout_arg) /** * The core DRM version of this macro doesn't account for @@ -165,6 +169,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, + vmw_kms_update_layout_ioctl, + DRM_MASTER | DRM_UNLOCKED), }; static struct pci_device_id vmw_pci_id_list[] = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 30589d0aecd9..8cca91a93bde 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -40,9 +40,9 @@ #include "ttm/ttm_module.h" #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20111008" +#define VMWGFX_DRIVER_DATE "20111025" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 2 +#define VMWGFX_DRIVER_MINOR 3 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) @@ -633,6 +633,8 @@ int vmw_kms_readback(struct vmw_private *dev_priv, struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_rect *clips, uint32_t num_clips); +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** * Overlay control - vmwgfx_overlay.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 070797b7b03a..34e51a1695b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -26,6 +26,8 @@ * **************************************************************************/ +#include <linux/export.h> + #include "drmP.h" #include "vmwgfx_drv.h" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8b14dfd513a1..37d40545ed77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -105,12 +105,17 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, struct vmw_dma_buffer *dmabuf = NULL; int ret; + /* A lot of the code assumes this */ + if (handle && (width != 64 || height != 64)) + return -EINVAL; + if (handle) { ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, &surface); if (!ret) { if (!surface->snooper.image) { DRM_ERROR("surface not suitable for cursor\n"); + vmw_surface_unreference(&surface); return -EINVAL; } } else { @@ -176,7 +181,9 @@ err_unreserve: return 0; } - vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); + vmw_cursor_update_position(dev_priv, true, + du->cursor_x + du->hotspot_x, + du->cursor_y + du->hotspot_y); return 0; } @@ -191,7 +198,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) du->cursor_y = y + crtc->y; vmw_cursor_update_position(dev_priv, shown, - du->cursor_x, du->cursor_y); + du->cursor_x + du->hotspot_x, + du->cursor_y + du->hotspot_y); return 0; } @@ -212,7 +220,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, SVGA3dCmdHeader header; SVGA3dCmdSurfaceDMA dma; } *cmd; - int ret; + int i, ret; cmd = container_of(header, struct vmw_dma_cmd, header); @@ -234,16 +242,19 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / sizeof(SVGA3dCopyBox); - if (cmd->dma.guest.pitch != (64 * 4) || - cmd->dma.guest.ptr.offset % PAGE_SIZE || + if (cmd->dma.guest.ptr.offset % PAGE_SIZE || box->x != 0 || box->y != 0 || box->z != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->w != 64 || box->h != 64 || box->d != 1 || - box_count != 1) { + box->d != 1 || box_count != 1) { /* TODO handle none page aligned offsets */ - /* TODO handle partial uploads and pitch != 256 */ - /* TODO handle more then one copy (size != 64) */ - DRM_ERROR("lazy programmer, can't handle weird stuff\n"); + /* TODO handle more dst & src != 0 */ + /* TODO handle more then one copy */ + DRM_ERROR("Cant snoop dma request for cursor!\n"); + DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", + box->srcx, box->srcy, box->srcz, + box->x, box->y, box->z, + box->w, box->h, box->d, box_count, + cmd->dma.guest.ptr.offset); return; } @@ -262,7 +273,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, virtual = ttm_kmap_obj_virtual(&map, &dummy); - memcpy(srf->snooper.image, virtual, 64*64*4); + if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { + memcpy(srf->snooper.image, virtual, 64*64*4); + } else { + /* Image is unsigned pointer. */ + for (i = 0; i < box->h; i++) + memcpy(srf->snooper.image + i * 64, + virtual + i * cmd->dma.guest.pitch, + box->w * 4); + } + srf->snooper.age++; /* we can't call this function from this function since execbuf has @@ -394,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, top = clips->y1; bottom = clips->y2; - clips_ptr = clips; - for (i = 1; i < num_clips; i++, clips_ptr += inc) { + /* skip the first clip rect */ + for (i = 1, clips_ptr = clips + inc; + i < num_clips; i++, clips_ptr += inc) { left = min_t(int, left, (int)clips_ptr->x1); right = max_t(int, right, (int)clips_ptr->x2); top = min_t(int, top, (int)clips_ptr->y1); @@ -994,7 +1015,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, required_size = mode_cmd->pitch * mode_cmd->height; if (unlikely(required_size > (u64) dev_priv->vram_size)) { DRM_ERROR("VRAM size is too small for requested mode.\n"); - return NULL; + return ERR_PTR(-ENOMEM); } /* @@ -1307,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv) * drm_encoder_cleanup which takes the lock we deadlock. */ drm_mode_config_cleanup(dev_priv->dev); - vmw_kms_close_legacy_display_system(dev_priv); + if (dev_priv->sou_priv) + vmw_kms_close_screen_object_display(dev_priv); + else + vmw_kms_close_legacy_display_system(dev_priv); return 0; } @@ -1517,6 +1541,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, du->pref_width = rects[du->unit].w; du->pref_height = rects[du->unit].h; du->pref_active = true; + du->gui_x = rects[du->unit].x; + du->gui_y = rects[du->unit].y; } else { du->pref_width = 800; du->pref_height = 600; @@ -1572,12 +1598,14 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) uint32_t num_displays; struct drm_device *dev = connector->dev; struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_display_unit *du = vmw_connector_to_du(connector); mutex_lock(&dev_priv->hw_mutex); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); mutex_unlock(&dev_priv->hw_mutex); - return ((vmw_connector_to_du(connector)->unit < num_displays) ? + return ((vmw_connector_to_du(connector)->unit < num_displays && + du->pref_active) ? connector_status_connected : connector_status_disconnected); } @@ -1658,6 +1686,28 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, }; +/** + * vmw_guess_mode_timing - Provide fake timings for a + * 60Hz vrefresh mode. + * + * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay + * members filled in. + */ +static void vmw_guess_mode_timing(struct drm_display_mode *mode) +{ + mode->hsync_start = mode->hdisplay + 50; + mode->hsync_end = mode->hsync_start + 50; + mode->htotal = mode->hsync_end + 50; + + mode->vsync_start = mode->vdisplay + 50; + mode->vsync_end = mode->vsync_start + 50; + mode->vtotal = mode->vsync_end + 50; + + mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; + mode->vrefresh = drm_mode_vrefresh(mode); +} + + int vmw_du_connector_fill_modes(struct drm_connector *connector, uint32_t max_width, uint32_t max_height) { @@ -1680,18 +1730,23 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, return 0; mode->hdisplay = du->pref_width; mode->vdisplay = du->pref_height; - mode->vrefresh = drm_mode_vrefresh(mode); + vmw_guess_mode_timing(mode); + if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, mode->vdisplay)) { drm_mode_probed_add(connector, mode); + } else { + drm_mode_destroy(dev, mode); + mode = NULL; + } - if (du->pref_mode) { - list_del_init(&du->pref_mode->head); - drm_mode_destroy(dev, du->pref_mode); - } - - du->pref_mode = mode; + if (du->pref_mode) { + list_del_init(&du->pref_mode->head); + drm_mode_destroy(dev, du->pref_mode); } + + /* mode might be null here, this is intended */ + du->pref_mode = mode; } for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { @@ -1712,6 +1767,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, drm_mode_probed_add(connector, mode); } + /* Move the prefered mode first, help apps pick the right mode. */ + if (du->pref_mode) + list_move(&du->pref_mode->head, &connector->probed_modes); + drm_mode_connector_list_update(connector); return 1; @@ -1723,3 +1782,64 @@ int vmw_du_connector_set_property(struct drm_connector *connector, { return 0; } + + +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_update_layout_arg *arg = + (struct drm_vmw_update_layout_arg *)data; + struct vmw_master *vmaster = vmw_master(file_priv->master); + void __user *user_rects; + struct drm_vmw_rect *rects; + unsigned rects_size; + int ret; + int i; + struct drm_mode_config *mode_config = &dev->mode_config; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + if (!arg->num_outputs) { + struct drm_vmw_rect def_rect = {0, 0, 800, 600}; + vmw_du_update_layout(dev_priv, 1, &def_rect); + goto out_unlock; + } + + rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); + rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), + GFP_KERNEL); + if (unlikely(!rects)) { + ret = -ENOMEM; + goto out_unlock; + } + + user_rects = (void __user *)(unsigned long)arg->rects; + ret = copy_from_user(rects, user_rects, rects_size); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to get rects.\n"); + ret = -EFAULT; + goto out_free; + } + + for (i = 0; i < arg->num_outputs; ++i) { + if (rects[i].x < 0 || + rects[i].y < 0 || + rects[i].x + rects[i].w > mode_config->max_width || + rects[i].y + rects[i].h > mode_config->max_height) { + DRM_ERROR("Invalid GUI layout.\n"); + ret = -EINVAL; + goto out_free; + } + } + + vmw_du_update_layout(dev_priv, arg->num_outputs, rects); + +out_free: + kfree(rects); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index db0b901f8c3f..af8e6e5bd964 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -96,6 +96,13 @@ struct vmw_display_unit { unsigned pref_height; bool pref_active; struct drm_display_mode *pref_mode; + + /* + * Gui positioning + */ + int gui_x; + int gui_y; + bool is_implicit; }; #define vmw_crtc_to_du(x) \ @@ -126,8 +133,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, int vmw_du_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val); -int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, - struct drm_vmw_rect *rects); + /* * Legacy display unit functions - vmwgfx_ldu.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 92f56bc594eb..90c5e3928491 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -337,13 +337,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) ldu->base.pref_width = 800; ldu->base.pref_height = 600; ldu->base.pref_mode = NULL; + ldu->base.is_implicit = true; drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); + DRM_MODE_CONNECTOR_VIRTUAL); connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_VIRTUAL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 477b2a9eb3c2..4defdcf1c72e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -36,12 +36,9 @@ container_of(x, struct vmw_screen_object_unit, base.connector) struct vmw_screen_object_display { - struct list_head active; + unsigned num_implicit; - unsigned num_active; - unsigned last_num_active; - - struct vmw_framebuffer *fb; + struct vmw_framebuffer *implicit_fb; }; /** @@ -54,13 +51,11 @@ struct vmw_screen_object_unit { struct vmw_dma_buffer *buffer; /**< Backing store buffer */ bool defined; - - struct list_head active; + bool active_implicit; }; static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) { - list_del_init(&sou->active); vmw_display_unit_cleanup(&sou->base); kfree(sou); } @@ -75,58 +70,31 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) vmw_sou_destroy(vmw_crtc_to_sou(crtc)); } -static int vmw_sou_del_active(struct vmw_private *vmw_priv, +static void vmw_sou_del_active(struct vmw_private *vmw_priv, struct vmw_screen_object_unit *sou) { struct vmw_screen_object_display *ld = vmw_priv->sou_priv; - if (list_empty(&sou->active)) - return 0; - /* Must init otherwise list_empty(&sou->active) will not work. */ - list_del_init(&sou->active); - if (--(ld->num_active) == 0) { - BUG_ON(!ld->fb); - if (ld->fb->unpin) - ld->fb->unpin(ld->fb); - ld->fb = NULL; + if (sou->active_implicit) { + if (--(ld->num_implicit) == 0) + ld->implicit_fb = NULL; + sou->active_implicit = false; } - - return 0; } -static int vmw_sou_add_active(struct vmw_private *vmw_priv, +static void vmw_sou_add_active(struct vmw_private *vmw_priv, struct vmw_screen_object_unit *sou, struct vmw_framebuffer *vfb) { struct vmw_screen_object_display *ld = vmw_priv->sou_priv; - struct vmw_screen_object_unit *entry; - struct list_head *at; - - BUG_ON(!ld->num_active && ld->fb); - if (vfb != ld->fb) { - if (ld->fb && ld->fb->unpin) - ld->fb->unpin(ld->fb); - if (vfb->pin) - vfb->pin(vfb); - ld->fb = vfb; - } - - if (!list_empty(&sou->active)) - return 0; - at = &ld->active; - list_for_each_entry(entry, &ld->active, active) { - if (entry->base.unit > sou->base.unit) - break; + BUG_ON(!ld->num_implicit && ld->implicit_fb); - at = &entry->active; + if (!sou->active_implicit && sou->base.is_implicit) { + ld->implicit_fb = vfb; + sou->active_implicit = true; + ld->num_implicit++; } - - list_add(&sou->active, at); - - ld->num_active++; - - return 0; } /** @@ -164,8 +132,13 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); cmd->obj.size.width = mode->hdisplay; cmd->obj.size.height = mode->vdisplay; - cmd->obj.root.x = x; - cmd->obj.root.y = y; + if (sou->base.is_implicit) { + cmd->obj.root.x = x; + cmd->obj.root.y = y; + } else { + cmd->obj.root.x = sou->base.gui_x; + cmd->obj.root.y = sou->base.gui_y; + } /* Ok to assume that buffer is pinned in vram */ vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); @@ -312,10 +285,11 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) } /* sou only supports one fb active at the time */ - if (dev_priv->sou_priv->fb && vfb && - !(dev_priv->sou_priv->num_active == 1 && - !list_empty(&sou->active)) && - dev_priv->sou_priv->fb != vfb) { + if (sou->base.is_implicit && + dev_priv->sou_priv->implicit_fb && vfb && + !(dev_priv->sou_priv->num_implicit == 1 && + sou->active_implicit) && + dev_priv->sou_priv->implicit_fb != vfb) { DRM_ERROR("Multiple framebuffers not supported\n"); return -EINVAL; } @@ -471,19 +445,20 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) encoder = &sou->base.encoder; connector = &sou->base.connector; - INIT_LIST_HEAD(&sou->active); + sou->active_implicit = false; sou->base.pref_active = (unit == 0); sou->base.pref_width = 800; sou->base.pref_height = 600; sou->base.pref_mode = NULL; + sou->base.is_implicit = true; drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); + DRM_MODE_CONNECTOR_VIRTUAL); connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_VIRTUAL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; @@ -520,10 +495,8 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) if (unlikely(!dev_priv->sou_priv)) goto err_no_mem; - INIT_LIST_HEAD(&dev_priv->sou_priv->active); - dev_priv->sou_priv->num_active = 0; - dev_priv->sou_priv->last_num_active = 0; - dev_priv->sou_priv->fb = NULL; + dev_priv->sou_priv->num_implicit = 0; + dev_priv->sou_priv->implicit_fb = NULL; ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); if (unlikely(ret != 0)) @@ -558,9 +531,6 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) drm_vblank_cleanup(dev); - if (!list_empty(&dev_priv->sou_priv->active)) - DRM_ERROR("Still have active outputs when unloading driver"); - kfree(dev_priv->sou_priv); return 0; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index c72f1c0b5e63..111d956d8e7d 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) while (new_bus) { new_bridge = new_bus->self; - if (new_bridge) { - /* go through list of devices already registered */ - list_for_each_entry(same_bridge_vgadev, &vga_list, list) { - bus = same_bridge_vgadev->pdev->bus; - bridge = bus->self; - - /* see if the share a bridge with this device */ - if (new_bridge == bridge) { - /* if their direct parent bridge is the same - as any bridge of this device then it can't be used - for that device */ - same_bridge_vgadev->bridge_has_one_vga = false; - } + /* go through list of devices already registered */ + list_for_each_entry(same_bridge_vgadev, &vga_list, list) { + bus = same_bridge_vgadev->pdev->bus; + bridge = bus->self; + + /* see if the share a bridge with this device */ + if (new_bridge == bridge) { + /* if their direct parent bridge is the same + as any bridge of this device then it can't be used + for that device */ + same_bridge_vgadev->bridge_has_one_vga = false; + } - /* now iterate the previous devices bridge hierarchy */ - /* if the new devices parent bridge is in the other devices - hierarchy then we can't use it to control this device */ - while (bus) { - bridge = bus->self; - if (bridge) { - if (bridge == vgadev->pdev->bus->self) - vgadev->bridge_has_one_vga = false; - } - bus = bus->parent; + /* now iterate the previous devices bridge hierarchy */ + /* if the new devices parent bridge is in the other devices + hierarchy then we can't use it to control this device */ + while (bus) { + bridge = bus->self; + if (bridge) { + if (bridge == vgadev->pdev->bus->self) + vgadev->bridge_has_one_vga = false; } + bus = bus->parent; } } new_bus = new_bus->parent; @@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, uc = &priv->cards[i]; } - if (!uc) - return -EINVAL; + if (!uc) { + ret_val = -EINVAL; + goto done; + } - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) - return -EINVAL; + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { + ret_val = -EINVAL; + goto done; + } - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) - return -EINVAL; + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { + ret_val = -EINVAL; + goto done; + } vga_put(pdev, io_state); |