diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-06 03:10:48 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-06 03:10:48 +0200 |
commit | fb7ba2114bcd8bb51640c20bc68f89164b29b9ed (patch) | |
tree | 80b4a779130a477680a72109257fb8c19d66cf22 /drivers | |
parent | Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next (diff) | |
parent | vmwgfx: Fix fb VRAM pinning failure due to fragmentation (diff) | |
download | linux-fb7ba2114bcd8bb51640c20bc68f89164b29b9ed.tar.xz linux-fb7ba2114bcd8bb51640c20bc68f89164b29b9ed.zip |
Merge remote branch 'korg/drm-fixes' into drm-vmware-next
necessary for some of the vmware fixes to be pushed in.
Conflicts:
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/intel_fb.c
include/drm/drmP.h
Diffstat (limited to 'drivers')
45 files changed, 391 insertions, 227 deletions
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed05000..529a0dbe9fc6 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c @@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); * user_data: A pointer the data that is copied to the buffer. * size: The Number of bytes to copy. */ -extern int drm_buffer_copy_from_user(struct drm_buffer *buf, - void __user *user_data, int size) +int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size) { int nr_pages = size / PAGE_SIZE + 1; int idx; @@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, { int idx = drm_buffer_index(buf); int page = drm_buffer_page(buf); - void *obj = 0; + void *obj = NULL; if (idx + objsize <= PAGE_SIZE) { obj = &buf->data[page][idx]; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 3ea0692ce59a..ea1c4b019ebf 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -142,7 +142,7 @@ int drm_gem_object_init(struct drm_device *dev, return -ENOMEM; kref_init(&obj->refcount); - kref_init(&obj->handlecount); + atomic_set(&obj->handle_count, 0); obj->size = size; return 0; @@ -448,26 +448,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -/** - * Called after the last reference to the object has been lost. - * Must be called without holding struct_mutex - * - * Frees the object - */ -void -drm_gem_object_free_unlocked(struct kref *kref) -{ - struct drm_gem_object *obj = (struct drm_gem_object *) kref; - struct drm_device *dev = obj->dev; - - if (dev->driver->gem_free_object != NULL) { - mutex_lock(&dev->struct_mutex); - dev->driver->gem_free_object(obj); - mutex_unlock(&dev->struct_mutex); - } -} -EXPORT_SYMBOL(drm_gem_object_free_unlocked); - static void drm_gem_object_ref_bug(struct kref *list_kref) { BUG(); @@ -480,12 +460,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) * called before drm_gem_object_free or we'll be touching * freed memory */ -void -drm_gem_object_handle_free(struct kref *kref) +void drm_gem_object_handle_free(struct drm_gem_object *obj) { - struct drm_gem_object *obj = container_of(kref, - struct drm_gem_object, - handlecount); struct drm_device *dev = obj->dev; /* Remove any name for this object */ @@ -512,6 +488,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_reference(obj); + + mutex_lock(&obj->dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_open); @@ -519,7 +499,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - drm_gem_object_unreference_unlocked(obj); + mutex_lock(&obj->dev->struct_mutex); + drm_vm_close_locked(vma); + drm_gem_object_unreference(obj); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 5aff08e236cf..3cdbaf379bb5 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) seq_printf(m, "%6d %8zd %7d %8d\n", obj->name, obj->size, - atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->handle_count), atomic_read(&obj->refcount.refcount)); return 0; } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index ee879d6bb522..2c3fcbdfd8ff 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } -/** - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) +void drm_vm_close_locked(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); - mutex_lock(&dev->struct_mutex); list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); @@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) break; } } +} + +/** + * \c close method for all virtual memory types. + * + * \param vma virtual memory area. + * + * Search the \p vma private data entry in drm_device::vmalist, unlink it, and + * free it. + */ +static void drm_vm_close(struct vm_area_struct *vma) +{ + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_close_locked(vma); mutex_unlock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220fa..fb07e73581e8 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) static const struct file_operations i810_buffer_fops = { .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i810_ioctl, .mmap = i810_mmap_buffers, .fasync = drm_fasync, }; diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415ac..cc92c7e6236f 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) static const struct file_operations i830_buffer_fops = { .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i830_ioctl, .mmap = i830_mmap_buffers, .fasync = drm_fasync, }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 29e97c075421..100a7537980e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -244,14 +244,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); return ret; } - /* Sink the floating reference from kref_init(handlecount) */ - drm_gem_object_handle_unreference_unlocked(obj); - args->handle = handle; return 0; } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7dc50acd65d7..b937ccfa7bec 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -224,8 +224,10 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_cleanup(&ifb->base); - if (ifb->obj) + if (ifb->obj) { + drm_gem_object_handle_unreference_unlocked(ifb->obj); drm_gem_object_unreference_unlocked(ifb->obj); + } } int intel_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 87186a4bbf03..fc737037f751 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) if (nv_encoder->dcb->type == OUTPUT_LVDS && (nv_encoder->dcb->lvdsconf.use_straps_for_mode || dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { - nv_connector->native_mode = drm_mode_create(dev); - nouveau_bios_fp_mode(dev, nv_connector->native_mode); + struct drm_display_mode mode; + + nouveau_bios_fp_mode(dev, &mode); + nv_connector->native_mode = drm_mode_duplicate(dev, &mode); } /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index dbd30b2e43fd..d2047713dc59 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); + drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53fc..19620a6709f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, goto out; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(nvbo->gem); out: - drm_gem_object_handle_unreference_unlocked(nvbo->gem); - - if (ret) - drm_gem_object_unreference_unlocked(nvbo->gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..3c9964a8fbad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); drm_mm_takedown(&chan->notifier_heap); } diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a9..fe359a239df3 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS #define SW_I2C_CNTL_WRITE1BIT 6 //==============================VESA definition Portion=============================== -#define VESA_OEM_PRODUCT_REV '01.00' +#define VESA_OEM_PRODUCT_REV "01.00" #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support #define VESA_MODE_WIN_ATTRIBUTE 7 #define VESA_WIN_SIZE 64 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index afc18d87fdca..7a04959ba0ee 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev) if (i < rdev->usec_timeout) { DRM_INFO("ib test succeeded in %u usecs\n", i); } else { - DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", + DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } @@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL */ - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && + rdev->vram_scratch.ptr) { void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ebae14c4b768..68932ba7b8a4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ + if ((dev->pdev->device == 0x796e) && + (dev->pdev->subsystem_vendor == 0x1462) && + (dev->pdev->subsystem_device == 0x7302)) { + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) + return false; + } + /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 127a395f70fb..b92d2f2fcbed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP5_SUPPORT) DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); + if (devices & ATOM_DEVICE_DFP6_SUPPORT) + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_TV1_SUPPORT) DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_CV_SUPPORT) @@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - if (radeon_fb->obj) + if (radeon_fb->obj) { drm_gem_object_unreference_unlocked(radeon_fb->obj); + } drm_framebuffer_cleanup(fb); kfree(radeon_fb); } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d941..9cdf6a35bc2c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) ret = radeon_bo_reserve(rbo, false); if (likely(ret == 0)) { radeon_bo_kunmap(rbo); + radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } + drm_gem_object_handle_unreference(gobj); drm_gem_object_unreference_unlocked(gobj); } @@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; - struct radeon_bo *rbo; - int r; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; @@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb } if (rfb->obj) { - rbo = rfb->obj->driver_private; - r = radeon_bo_reserve(rbo, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rbo); - radeon_bo_unpin(rbo); - radeon_bo_unreserve(rbo); - } - drm_gem_object_unreference_unlocked(rfb->obj); + radeonfb_destroy_pinned_object(rfb->obj); + rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_cleanup(&rfb->base); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24c..d1e595d91723 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return r; } r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); if (r) { - drm_gem_object_unreference_unlocked(gobj); return r; } - drm_gem_object_handle_unreference_unlocked(gobj); args->handle = handle; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d124..8fbbe1c6ebbd 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) */ int radeon_driver_firstopen_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + if (rdev->powered_down) + return -EINVAL; return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e04232..3451a82adba7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); fbo->vm_node = NULL; + atomic_set(&fbo->cpu_writers, 0); fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); kref_init(&fbo->list_kref); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f018..b1e02fffd3cc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -69,7 +69,7 @@ struct ttm_page_pool { spinlock_t lock; bool fill_lock; struct list_head list; - int gfp_flags; + gfp_t gfp_flags; unsigned npages; char *name; unsigned long nfrees; @@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, * This function is reentrant if caller updates count depending on number of * pages returned in pages array. */ -static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, +static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { struct page **caching_array; @@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct page *p = NULL; - int gfp_flags = GFP_USER; + gfp_t gfp_flags = GFP_USER; int r; /* set zero flag for page allocation if required */ @@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) return 0; } -void ttm_page_alloc_fini() +void ttm_page_alloc_fini(void) { int i; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e645f44e4302..5c845b6ec492 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { {0, 0, 0} }; -static char *vmw_devname = "vmwgfx"; +static int enable_fbdev; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); +MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); +module_param_named(enable_fbdev, enable_fbdev, int, 0600); + static void vmw_print_capabilities(uint32_t capabilities) { DRM_INFO("Capabilities:\n"); @@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) { int ret; - vmw_kms_save_vga(dev_priv); - ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); if (unlikely(ret != 0)) { DRM_ERROR("Unable to initialize FIFO.\n"); @@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) static void vmw_release_device(struct vmw_private *dev_priv) { vmw_fifo_release(dev_priv, &dev_priv->fifo); - vmw_kms_restore_vga(dev_priv); } +int vmw_3d_resource_inc(struct vmw_private *dev_priv) +{ + int ret = 0; + + mutex_lock(&dev_priv->release_mutex); + if (unlikely(dev_priv->num_3d_resources++ == 0)) { + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + --dev_priv->num_3d_resources; + } + mutex_unlock(&dev_priv->release_mutex); + return ret; +} + + +void vmw_3d_resource_dec(struct vmw_private *dev_priv) +{ + int32_t n3d; + + mutex_lock(&dev_priv->release_mutex); + if (unlikely(--dev_priv->num_3d_resources == 0)) + vmw_release_device(dev_priv); + n3d = (int32_t) dev_priv->num_3d_resources; + mutex_unlock(&dev_priv->release_mutex); + + BUG_ON(n3d < 0); +} static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { @@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->last_read_sequence = (uint32_t) -100; mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); + mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); idr_init(&dev_priv->context_idr); idr_init(&dev_priv->surface_idr); @@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); + dev_priv->enable_fb = enable_fbdev; + mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); @@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev->dev_private = dev_priv; - if (!dev->devname) - dev->devname = vmw_devname; - - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { - ret = drm_irq_install(dev); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed installing irq: %d\n", ret); - goto out_no_irq; - } - } - ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { @@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_device; } } - ret = vmw_request_device(dev_priv); + ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) - goto out_no_device; - vmw_kms_init(dev_priv); + goto out_no_kms; vmw_overlay_init(dev_priv); - vmw_fb_init(dev_priv); + if (dev_priv->enable_fb) { + ret = vmw_3d_resource_inc(dev_priv); + if (unlikely(ret != 0)) + goto out_no_fifo; + vmw_kms_save_vga(dev_priv); + vmw_fb_init(dev_priv); + DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? + "Detected device 3D availability.\n" : + "Detected no device 3D availability.\n"); + } else { + DRM_INFO("Delayed 3D detection since we're not " + "running the device in SVGA mode yet.\n"); + } + + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { + ret = drm_irq_install(dev); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + goto out_no_irq; + } + } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); - DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); - return 0; -out_no_device: - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); - if (dev->devname == vmw_devname) - dev->devname = NULL; out_no_irq: + if (dev_priv->enable_fb) { + vmw_fb_close(dev_priv); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } +out_no_fifo: + vmw_overlay_close(dev_priv); + vmw_kms_close(dev_priv); +out_no_kms: + if (dev_priv->stealth) + pci_release_region(dev->pdev, 2); + else + pci_release_regions(dev->pdev); +out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: iounmap(dev_priv->mmio_virt); @@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); - vmw_fb_close(dev_priv); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); + if (dev_priv->enable_fb) { + vmw_fb_close(dev_priv); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); - vmw_release_device(dev_priv); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); - if (dev->devname == vmw_devname) - dev->devname = NULL; ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, @@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; - if (unlikely(ioctl->cmd != cmd)) { + if (unlikely(ioctl->cmd_drv != cmd)) { DRM_ERROR("Invalid command format, ioctl %d\n", nr - DRM_COMMAND_BASE); return -EINVAL; @@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; + if (!dev_priv->enable_fb) { + ret = vmw_3d_resource_inc(dev_priv); + if (unlikely(ret != 0)) + return ret; + vmw_kms_save_vga(dev_priv); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); + mutex_unlock(&dev_priv->hw_mutex); + } + if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); @@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, return 0; out_no_active_lock: - vmw_release_device(dev_priv); + if (!dev_priv->enable_fb) { + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); + mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } return ret; } @@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + if (!dev_priv->enable_fb) { + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); + if (unlikely(ret != 0)) + DRM_ERROR("Unable to clean VRAM on master drop.\n"); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); + mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } + dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); - vmw_fb_on(dev_priv); + if (dev_priv->enable_fb) + vmw_fb_on(dev_priv); } @@ -722,6 +796,7 @@ static struct drm_driver driver = { .irq_postinstall = vmw_irq_postinstall, .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, + .get_vblank_counter = vmw_get_vblank_counter, .reclaim_buffers_locked = NULL, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60bf..58de6393f611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -277,6 +277,7 @@ struct vmw_private { bool stealth; bool is_opened; + bool enable_fb; /** * Master management. @@ -285,6 +286,9 @@ struct vmw_private { struct vmw_master *active_master; struct vmw_master fbdev_master; struct notifier_block pm_nb; + + struct mutex release_mutex; + uint32_t num_3d_resources; }; static inline struct vmw_private *vmw_priv(struct drm_device *dev) @@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, return val; } +int vmw_3d_resource_inc(struct vmw_private *dev_priv); +void vmw_3d_resource_dec(struct vmw_private *dev_priv); + /** * GMR utilities - vmwgfx_gmr.c */ @@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned bbp, unsigned depth); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** * Overlay control - vmwgfx_overlay.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c15..409e172f4abf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, if (unlikely(ret != 0)) goto err_unlock; + if (bo->mem.mem_type == TTM_PL_VRAM && + bo->mem.mm_node->start < bo->num_pages) + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, + false, false); + ret = ttm_bo_validate(bo, &ne_placement, false, false, false); /* Could probably bug on */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea954..0fe31766e4cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); vmw_write(dev_priv, SVGA_REG_ENABLE, 1); min = 4; @@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) dev_priv->config_done_state); vmw_write(dev_priv, SVGA_REG_ENABLE, dev_priv->enable_state); + vmw_write(dev_priv, SVGA_REG_TRACES, + dev_priv->traces_state); mutex_unlock(&dev_priv->hw_mutex); vmw_fence_queue_takedown(&fifo->fence_queue); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df868..e882ba099f0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + if (i == 0 && vmw_priv->num_displays == 1 && + save->width == 0 && save->height == 0) { + + /* + * It should be fairly safe to assume that these + * values are uninitialized. + */ + + save->width = vmw_priv->vga_width - save->pos_x; + save->height = vmw_priv->vga_height - save->pos_y; + } } + return 0; } @@ -984,3 +996,8 @@ out_unlock: ttm_read_unlock(&vmaster->lock); return ret; } + +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) +{ + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7083b1a24df3..11cb39e3accb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -27,6 +27,8 @@ #include "vmwgfx_kms.h" +#define VMWGFX_LDU_NUM_DU 8 + #define vmw_crtc_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.crtc) #define vmw_encoder_to_ldu(x) \ @@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + int i; + int ret; + if (dev_priv->ldu_priv) { DRM_INFO("ldu system already on\n"); return -EINVAL; @@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) drm_mode_create_dirty_info_property(dev_priv->dev); - vmw_ldu_init(dev_priv, 0); - /* for old hardware without multimon only enable one display */ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { - vmw_ldu_init(dev_priv, 1); - vmw_ldu_init(dev_priv, 2); - vmw_ldu_init(dev_priv, 3); - vmw_ldu_init(dev_priv, 4); - vmw_ldu_init(dev_priv, 5); - vmw_ldu_init(dev_priv, 6); - vmw_ldu_init(dev_priv, 7); + for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) + vmw_ldu_init(dev_priv, i); + ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); + } else { + /* for old hardware without multimon only enable one display */ + vmw_ldu_init(dev_priv, 0); + ret = drm_vblank_init(dev, 1); } - return 0; + return ret; } int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + + drm_vblank_cleanup(dev); if (!dev_priv->ldu_priv) return -ENOSYS; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5c..c8c40e9979db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) cmd->body.cid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv); } static int vmw_context_init(struct vmw_private *dev_priv, @@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, cmd->body.cid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv); vmw_resource_activate(res, vmw_hw_context_destroy); return 0; } @@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) cmd->body.sid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv); } void vmw_surface_res_free(struct vmw_resource *res) @@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, } vmw_fifo_commit(dev_priv, submit_size); + (void) vmw_3d_resource_inc(dev_priv); vmw_resource_activate(res, vmw_hw_surface_destroy); return 0; } diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b16..f366f968155a 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); } -void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) +static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) { struct vga_device *vgadev; unsigned long flags; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4d4d09bdec0a..97499d00615a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -409,7 +409,7 @@ config SENSORS_CORETEMP config SENSORS_PKGTEMP tristate "Intel processor package temperature sensor" - depends on X86 && PCI && EXPERIMENTAL + depends on X86 && EXPERIMENTAL help If you say yes here you get support for the package level temperature sensor inside your CPU. Check documentation/driver for details. diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index de8111114f46..baa842a80b4b 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -423,9 +423,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; -#ifdef CONFIG_SMP struct cpuinfo_x86 *c = &cpu_data(cpu); -#endif + + /* + * CPUID.06H.EAX[0] indicates whether the CPU has thermal + * sensors. We check this bit only, all the early CPUs + * without thermal sensors will be filtered out. + */ + if (!cpu_has(c, X86_FEATURE_DTS)) { + printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" + " has no thermal sensor.\n", c->x86_model); + return 0; + } mutex_lock(&pdev_list_mutex); @@ -482,14 +491,22 @@ exit: static void coretemp_device_remove(unsigned int cpu) { - struct pdev_entry *p, *n; + struct pdev_entry *p; + unsigned int i; + mutex_lock(&pdev_list_mutex); - list_for_each_entry_safe(p, n, &pdev_list, list) { - if (p->cpu == cpu) { - platform_device_unregister(p->pdev); - list_del(&p->list); - kfree(p); - } + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu != cpu) + continue; + + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + for_each_cpu(i, cpu_sibling_mask(cpu)) + if (i != cpu && !coretemp_device_add(i)) + break; + return; } mutex_unlock(&pdev_list_mutex); } @@ -527,30 +544,21 @@ static int __init coretemp_init(void) if (err) goto exit; - for_each_online_cpu(i) { - struct cpuinfo_x86 *c = &cpu_data(i); - /* - * CPUID.06H.EAX[0] indicates whether the CPU has thermal - * sensors. We check this bit only, all the early CPUs - * without thermal sensors will be filtered out. - */ - if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) - coretemp_device_add(i); - else { - printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" - " has no thermal sensor.\n", c->x86_model); - } - } + for_each_online_cpu(i) + coretemp_device_add(i); + +#ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } +#endif register_hotcpu_notifier(&coretemp_cpu_notifier); return 0; -exit_driver_unreg: #ifndef CONFIG_HOTPLUG_CPU +exit_driver_unreg: platform_driver_unregister(&coretemp_driver); #endif exit: diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 6138f036b159..fc591ae53107 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) wake_up_interruptible(&lis3_dev.misc_wait); kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); out: - if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && + if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && lis3_dev.idev->input->users) return IRQ_WAKE_THREAD; return IRQ_HANDLED; @@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) * io-apic is not configurable (and generates a warning) but I keep it * in case of support for other hardware. */ - if (dev->whoami == WAI_8B) + if (dev->pdata && dev->whoami == WAI_8B) thread_fn = lis302dl_interrupt_thread1_8b; else thread_fn = NULL; diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c index 74157fcda6ed..f11903936c8b 100644 --- a/drivers/hwmon/pkgtemp.c +++ b/drivers/hwmon/pkgtemp.c @@ -33,7 +33,6 @@ #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> -#include <linux/pci.h> #include <asm/msr.h> #include <asm/processor.h> @@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); if (err) - goto exit_free; + goto exit_dev; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { @@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) exit_class: sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); +exit_dev: + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); exit_free: kfree(data); exit: @@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev) hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); platform_set_drvdata(pdev, NULL); kfree(data); return 0; @@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu) int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; -#ifdef CONFIG_SMP struct cpuinfo_x86 *c = &cpu_data(cpu); -#endif + + if (!cpu_has(c, X86_FEATURE_PTS)) + return 0; mutex_lock(&pdev_list_mutex); @@ -339,17 +342,18 @@ exit: #ifdef CONFIG_HOTPLUG_CPU static void pkgtemp_device_remove(unsigned int cpu) { - struct pdev_entry *p, *n; + struct pdev_entry *p; unsigned int i; int err; mutex_lock(&pdev_list_mutex); - list_for_each_entry_safe(p, n, &pdev_list, list) { + list_for_each_entry(p, &pdev_list, list) { if (p->cpu != cpu) continue; platform_device_unregister(p->pdev); list_del(&p->list); + mutex_unlock(&pdev_list_mutex); kfree(p); for_each_cpu(i, cpu_core_mask(cpu)) { if (i != cpu) { @@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu) break; } } - break; + return; } mutex_unlock(&pdev_list_mutex); } @@ -399,11 +403,6 @@ static int __init pkgtemp_init(void) goto exit; for_each_online_cpu(i) { - struct cpuinfo_x86 *c = &cpu_data(i); - - if (!cpu_has(c, X86_FEATURE_PTS)) - continue; - err = pkgtemp_device_add(i); if (err) goto exit_devices_unreg; diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h index 9952579425b9..1b3060eb2921 100644 --- a/drivers/staging/ti-st/st.h +++ b/drivers/staging/ti-st/st.h @@ -80,5 +80,4 @@ struct st_proto_s { extern long st_register(struct st_proto_s *); extern long st_unregister(enum proto_type); -extern struct platform_device *st_get_plat_device(void); #endif /* ST_H */ diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c index 063c9b1db1ab..b85d8bfdf600 100644 --- a/drivers/staging/ti-st/st_core.c +++ b/drivers/staging/ti-st/st_core.c @@ -38,7 +38,6 @@ #include "st_ll.h" #include "st.h" -#define VERBOSE /* strings to be used for rfkill entries and by * ST Core to be used for sysfs debug entry */ @@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto) long err = 0; unsigned long flags = 0; - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); pr_info("%s(%d) ", __func__, new_proto->type); if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL || new_proto->reg_complete_cb == NULL) { @@ -713,7 +712,7 @@ long st_unregister(enum proto_type type) pr_debug("%s: %d ", __func__, type); - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); if (type < ST_BT || type >= ST_MAX) { pr_err(" protocol %d not supported", type); return -EPROTONOSUPPORT; @@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb) #endif long len; - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); if (unlikely(skb == NULL || st_gdata == NULL || st_gdata->tty == NULL)) { pr_err("data/tty unavailable to perform write"); @@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty) struct st_data_s *st_gdata; pr_info("%s ", __func__); - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); st_gdata->tty = tty; tty->disc_data = st_gdata; diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h index e0c32d149f5f..8601320a679e 100644 --- a/drivers/staging/ti-st/st_core.h +++ b/drivers/staging/ti-st/st_core.h @@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **); void st_core_exit(struct st_data_s *); /* ask for reference from KIM */ -void st_kim_ref(struct st_data_s **); +void st_kim_ref(struct st_data_s **, int); #define GPS_STUB_TEST #ifdef GPS_STUB_TEST diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c index b4a6c7fdc4e6..9e99463f76e8 100644 --- a/drivers/staging/ti-st/st_kim.c +++ b/drivers/staging/ti-st/st_kim.c @@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = { PROTO_ENTRY(ST_GPS, "GPS"), }; +#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ +struct platform_device *st_kim_devices[MAX_ST_DEVICES]; /**********************************************************************/ /* internal functions */ /** + * st_get_plat_device - + * function which returns the reference to the platform device + * requested by id. As of now only 1 such device exists (id=0) + * the context requesting for reference can get the id to be + * requested by a. The protocol driver which is registering or + * b. the tty device which is opened. + */ +static struct platform_device *st_get_plat_device(int id) +{ + return st_kim_devices[id]; +} + +/** * validate_firmware_response - * function to return whether the firmware response was proper * in case of error don't complete so that waiting for proper @@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state) struct kim_data_s *kim_gdata; pr_info(" %s ", __func__); - kim_pdev = st_get_plat_device(); + kim_pdev = st_get_plat_device(0); kim_gdata = dev_get_drvdata(&kim_pdev->dev); if (kim_gdata->gpios[type] == -1) { @@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked) * This would enable multiple such platform devices to exist * on a given platform */ -void st_kim_ref(struct st_data_s **core_data) +void st_kim_ref(struct st_data_s **core_data, int id) { struct platform_device *pdev; struct kim_data_s *kim_gdata; /* get kim_gdata reference from platform device */ - pdev = st_get_plat_device(); + pdev = st_get_plat_device(id); kim_gdata = dev_get_drvdata(&pdev->dev); *core_data = kim_gdata->core_data; } @@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev) long *gpios = pdev->dev.platform_data; struct kim_data_s *kim_gdata; + st_kim_devices[pdev->id] = pdev; kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); if (!kim_gdata) { pr_err("no mem to allocate"); diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 7e594449600e..9eed5b52d9de 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS If you are unsure about this, say N here. config USB_SUSPEND - bool "USB runtime power management (suspend/resume and wakeup)" + bool "USB runtime power management (autosuspend) and wakeup" depends on USB && PM_RUNTIME help If you say Y here, you can use driver calls or the sysfs - "power/level" file to suspend or resume individual USB - peripherals and to enable or disable autosuspend (see + "power/control" file to enable or disable autosuspend for + individual USB peripherals (see Documentation/usb/power-management.txt for more details). Also, USB "remote wakeup" signaling is supported, whereby some diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5dbc8cdc..1e6ccef2cf0c 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -159,9 +159,9 @@ void usb_major_cleanup(void) int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { - int retval = -EINVAL; + int retval; int minor_base = class_driver->minor_base; - int minor = 0; + int minor; char name[20]; char *temp; @@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf, */ minor_base = 0; #endif - intf->minor = -1; - - dbg ("looking for a minor, starting at %d", minor_base); if (class_driver->fops == NULL) - goto exit; + return -EINVAL; + if (intf->minor >= 0) + return -EADDRINUSE; + + retval = init_usb_class(); + if (retval) + return retval; + + dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); down_write(&minor_rwsem); for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { @@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf, continue; usb_minors[minor] = class_driver->fops; - - retval = 0; + intf->minor = minor; break; } up_write(&minor_rwsem); - - if (retval) - goto exit; - - retval = init_usb_class(); - if (retval) - goto exit; - - intf->minor = minor; + if (intf->minor < 0) + return -EXFULL; /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf, "%s", temp); if (IS_ERR(intf->usb_dev)) { down_write(&minor_rwsem); - usb_minors[intf->minor] = NULL; + usb_minors[minor] = NULL; + intf->minor = -1; up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } -exit: return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 844683e50383..9f0ce7de0e36 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1802,6 +1802,7 @@ free_interfaces: intf->dev.groups = usb_interface_groups; intf->dev.dma_mask = dev->dev.dma_mask; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); + intf->minor = -1; device_initialize(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 59dc3d351b60..5ab5bb89bae3 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c, index, transmit ? 'T' : 'R', cppi_ch); cppi_ch->hw_ep = ep; cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; + cppi_ch->channel.max_len = 0x7fffffff; DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); return &cppi_ch->channel; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6fca870e957e..d065e23f123e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req) #ifndef CONFIG_MUSB_PIO_ONLY if (is_dma_capable() && musb_ep->dma) { struct dma_controller *c = musb->dma_controller; + size_t request_size; + + /* setup DMA, then program endpoint CSR */ + request_size = min_t(size_t, request->length - request->actual, + musb_ep->dma->max_len); use_dma = (request->dma != DMA_ADDR_INVALID); @@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req) #ifdef CONFIG_USB_INVENTRA_DMA { - size_t request_size; - - /* setup DMA, then program endpoint CSR */ - request_size = min_t(size_t, request->length, - musb_ep->dma->max_len); if (request_size < musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; else @@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, 0, - request->dma, - request->length); + request->dma + request->actual, + request_size); if (!use_dma) { c->channel_release(musb_ep->dma); musb_ep->dma = NULL; @@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, request->zero, - request->dma, - request->length); + request->dma + request->actual, + request_size); #endif } #endif @@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum) request->zero = 0; } - /* ... or if not, then complete it. */ - musb_g_giveback(musb_ep, request, 0); - - /* - * Kickstart next transfer if appropriate; - * the packet that just completed might not - * be transmitted for hours or days. - * REVISIT for double buffering... - * FIXME revisit for stalls too... - */ - musb_ep_select(mbase, epnum); - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) - return; - - request = musb_ep->desc ? next_request(musb_ep) : NULL; - if (!request) { - DBG(4, "%s idle now\n", - musb_ep->end_point.name); - return; + if (request->actual == request->length) { + musb_g_giveback(musb_ep, request, 0); + request = musb_ep->desc ? next_request(musb_ep) : NULL; + if (!request) { + DBG(4, "%s idle now\n", + musb_ep->end_point.name); + return; + } } } @@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req) { const u8 epnum = req->epnum; struct usb_request *request = &req->request; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; unsigned fifo_count = 0; - u16 len = musb_ep->packet_sz; + u16 len; u16 csr = musb_readw(epio, MUSB_RXCSR); + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; + + len = musb_ep->packet_sz; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { @@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) */ csr |= MUSB_RXCSR_DMAENAB; -#ifdef USE_MODE1 csr |= MUSB_RXCSR_AUTOCLEAR; +#ifdef USE_MODE1 /* csr |= MUSB_RXCSR_DMAMODE; */ /* this special sequence (enabling and then @@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req) if (request->actual < request->length) { int transfer_size = 0; #ifdef USE_MODE1 - transfer_size = min(request->length, + transfer_size = min(request->length - request->actual, channel->max_len); #else - transfer_size = len; + transfer_size = min(request->length - request->actual, + (unsigned)len); #endif if (transfer_size <= musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; @@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) u16 csr; struct usb_request *request; void __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; musb_ep_select(mbase, epnum); @@ -1081,7 +1084,7 @@ struct free_record { /* * Context: controller locked, IRQs blocked. */ -static void musb_ep_restart(struct musb *musb, struct musb_request *req) +void musb_ep_restart(struct musb *musb, struct musb_request *req) { DBG(3, "<== %s request %p len %u on hw_ep%d\n", req->tx ? "TX/IN" : "RX/OUT", diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index c8b140325d82..572b1da7f2dc 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h @@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *); extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); +extern void musb_ep_restart(struct musb *, struct musb_request *); + #endif /* __MUSB_GADGET_H */ diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 59bef8f3a358..6dd03f4c5f49 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -261,6 +261,7 @@ __acquires(musb->lock) ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; + struct musb_request *request; void __iomem *regs; int is_in; u16 csr; @@ -302,6 +303,14 @@ __acquires(musb->lock) musb_writew(regs, MUSB_RXCSR, csr); } + /* Maybe start the first request in the queue */ + request = to_musb_request( + next_request(musb_ep)); + if (!musb_ep->busy && request) { + DBG(3, "restarting the request\n"); + musb_ep_restart(musb, request); + } + /* select ep0 again */ musb_ep_select(mbase, 0); } break; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 877d20b1dff9..9e65c47cc98b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma, qh->segsize = length; + /* + * Ensure the data reaches to main memory before starting + * DMA transfer + */ + wmb(); + if (!dma->channel_program(channel, pkt_size, mode, urb->transfer_dma + offset, length)) { dma->channel_release(channel); |