summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/udl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/udl')
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c19
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_main.c128
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c49
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
5 files changed, 97 insertions, 150 deletions
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5703277c6f52..91effdcefb6d 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -21,8 +21,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ int ret;
- return drm_mode_config_helper_suspend(dev);
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
+
+ udl_sync_pending_urbs(dev);
+ return 0;
}
static int udl_usb_resume(struct usb_interface *interface)
@@ -32,6 +38,16 @@ static int udl_usb_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
+static int udl_usb_reset_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
+
+ udl_select_std_channel(udl);
+
+ return drm_mode_config_helper_resume(dev);
+}
+
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
@@ -140,6 +156,7 @@ static struct usb_driver udl_driver = {
.disconnect = udl_usb_disconnect,
.suspend = udl_usb_suspend,
.resume = udl_usb_resume,
+ .reset_resume = udl_usb_reset_resume,
.id_table = id_table,
};
module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc16a13316e4..b4cc7cc568c7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -34,14 +34,13 @@ struct udl_device;
struct urb_node {
struct list_head entry;
struct udl_device *dev;
- struct delayed_work release_urb_work;
struct urb *urb;
};
struct urb_list {
struct list_head list;
spinlock_t lock;
- struct semaphore limit_sem;
+ wait_queue_head_t sleep;
int available;
int count;
size_t size;
@@ -78,6 +77,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct drm_device *dev);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
@@ -87,6 +87,7 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
int udl_drop_usb(struct drm_device *dev);
+int udl_select_std_channel(struct udl_device *udl);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 853f147036f6..061cb88c08a2 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -20,11 +20,10 @@
#define NR_USB_REQUEST_CHANNEL 0x12
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
-#define WRITES_IN_FLIGHT (4)
+#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
-#define GET_URB_TIMEOUT HZ
-#define FREE_URB_TIMEOUT (HZ*2)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
@@ -95,7 +94,7 @@ success:
/*
* Need to ensure a channel is selected before submitting URBs
*/
-static int udl_select_std_channel(struct udl_device *udl)
+int udl_select_std_channel(struct udl_device *udl)
{
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
@@ -119,14 +118,6 @@ static int udl_select_std_channel(struct udl_device *udl)
return ret < 0 ? ret : 0;
}
-static void udl_release_urb_work(struct work_struct *work)
-{
- struct urb_node *unode = container_of(work, struct urb_node,
- release_urb_work.work);
-
- up(&unode->dev->urbs.limit_sem);
-}
-
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
@@ -137,6 +128,7 @@ void udl_urb_completion(struct urb *urb)
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
+ urb->status == -EPROTO ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
@@ -150,49 +142,34 @@ void udl_urb_completion(struct urb *urb)
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
-#if 0
- /*
- * When using fb_defio, we deadlock if up() is called
- * while another is waiting. So queue to another process.
- */
- if (fb_defio)
- schedule_delayed_work(&unode->release_urb_work, 0);
- else
-#endif
- up(&udl->urbs.limit_sem);
+ wake_up(&udl->urbs.sleep);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
- int count = udl->urbs.count;
- struct list_head *node;
struct urb_node *unode;
struct urb *urb;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
- while (count--) {
- down(&udl->urbs.limit_sem);
-
+ while (udl->urbs.count) {
spin_lock_irq(&udl->urbs.lock);
-
- node = udl->urbs.list.next; /* have reserved one with sem */
- list_del_init(node);
-
+ urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
+ udl->urbs.count--;
spin_unlock_irq(&udl->urbs.lock);
-
- unode = list_entry(node, struct urb_node, entry);
- urb = unode->urb;
-
+ if (WARN_ON(!urb))
+ break;
+ unode = urb->context;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
- kfree(node);
+ kfree(unode);
}
- udl->urbs.count = 0;
+
+ wake_up_all(&udl->urbs.sleep);
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
@@ -205,24 +182,20 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
struct usb_device *udev = udl_to_usb_device(udl);
spin_lock_init(&udl->urbs.lock);
-
-retry:
- udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
-
- sema_init(&udl->urbs.limit_sem, 0);
+ init_waitqueue_head(&udl->urbs.sleep);
udl->urbs.count = 0;
udl->urbs.available = 0;
+retry:
+ udl->urbs.size = size;
+
while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
- INIT_DELAYED_WORK(&unode->release_urb_work,
- udl_release_urb_work);
-
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
@@ -250,7 +223,6 @@ retry:
list_add_tail(&unode->entry, &udl->urbs.list);
- up(&udl->urbs.limit_sem);
udl->urbs.count++;
udl->urbs.available++;
}
@@ -260,35 +232,41 @@ retry:
return udl->urbs.count;
}
-struct urb *udl_get_urb(struct drm_device *dev)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
{
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
- struct list_head *entry;
struct urb_node *unode;
- struct urb *urb = NULL;
+
+ assert_spin_locked(&udl->urbs.lock);
/* Wait for an in-flight buffer to complete and get re-queued */
- ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
- if (ret) {
- DRM_INFO("wait for urb interrupted: %x available: %d\n",
- ret, udl->urbs.available);
- goto error;
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ !udl->urbs.count ||
+ !list_empty(&udl->urbs.list),
+ udl->urbs.lock, timeout)) {
+ DRM_INFO("wait for urb interrupted: available: %d\n",
+ udl->urbs.available);
+ return NULL;
}
- spin_lock_irq(&udl->urbs.lock);
+ if (!udl->urbs.count)
+ return NULL;
- BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
- entry = udl->urbs.list.next;
- list_del_init(entry);
+ unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
+ list_del_init(&unode->entry);
udl->urbs.available--;
- spin_unlock_irq(&udl->urbs.lock);
+ return unode ? unode->urb : NULL;
+}
- unode = list_entry(entry, struct urb_node, entry);
- urb = unode->urb;
+#define GET_URB_TIMEOUT HZ
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+ struct urb *urb;
-error:
+ spin_lock_irq(&udl->urbs.lock);
+ urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
+ spin_unlock_irq(&udl->urbs.lock);
return urb;
}
@@ -297,10 +275,13 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
struct udl_device *udl = to_udl(dev);
int ret;
- BUG_ON(len > udl->urbs.size);
-
+ if (WARN_ON(len > udl->urbs.size)) {
+ ret = -EINVAL;
+ goto error;
+ }
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
+ error:
if (ret) {
udl_urb_completion(urb); /* because no one else will */
DRM_ERROR("usb_submit_urb error %x\n", ret);
@@ -308,6 +289,21 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
+/* wait until all pending URBs have been processed */
+void udl_sync_pending_urbs(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+
+ spin_lock_irq(&udl->urbs.lock);
+ /* 2 seconds as a sane timeout */
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ udl->urbs.available == udl->urbs.count,
+ udl->urbs.lock,
+ msecs_to_jiffies(2000)))
+ drm_err(dev, "Timeout for syncing pending URBs\n");
+ spin_unlock_irq(&udl->urbs.lock);
+}
+
int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index e67c40a48fb4..ec6876f449f3 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -242,38 +242,15 @@ static long udl_log_cpp(unsigned int cpp)
return __ffs(cpp);
}
-static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
- int width, int height)
-{
- int x1, x2;
-
- if (WARN_ON_ONCE(x < 0) ||
- WARN_ON_ONCE(y < 0) ||
- WARN_ON_ONCE(width < 0) ||
- WARN_ON_ONCE(height < 0))
- return -EINVAL;
-
- x1 = ALIGN_DOWN(x, sizeof(unsigned long));
- x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
-
- clip->x1 = x1;
- clip->y1 = y;
- clip->x2 = x2;
- clip->y2 = y + height;
-
- return 0;
-}
-
static int udl_handle_damage(struct drm_framebuffer *fb,
const struct iosys_map *map,
- int x, int y, int width, int height)
+ const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
struct urb *urb;
- struct drm_rect clip;
int log_bpp;
ret = udl_log_cpp(fb->format->cpp[0]);
@@ -281,12 +258,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- ret = udl_aligned_damage_clip(&clip, x, y, width, height);
- if (ret)
- return ret;
- else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
- return -EINVAL;
-
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -298,11 +269,11 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
}
cmd = urb->transfer_buffer;
- for (i = clip.y1; i < clip.y2; i++) {
+ for (i = clip->y1; i < clip->y2; i++) {
const int line_offset = fb->pitches[0] * i;
- const int byte_offset = line_offset + (clip.x1 << log_bpp);
- const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
- const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+ const int byte_offset = line_offset + (clip->x1 << log_bpp);
+ const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
+ const int byte_width = drm_rect_width(clip) << log_bpp;
ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
@@ -355,6 +326,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height);
char *buf;
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
@@ -380,10 +352,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
udl->mode_buf_len = wrptr - buf;
- udl_handle_damage(fb, &shadow_plane_state->data[0], 0, 0, fb->width, fb->height);
-
- if (!crtc_state->mode_changed)
- return;
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &clip);
/* enable display */
udl_crtc_write_mode_to_hw(crtc);
@@ -423,8 +392,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
- udl_handle_damage(fb, &shadow_plane_state->data[0], rect.x1, rect.y1,
- rect.x2 - rect.x1, rect.y2 - rect.y1);
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
}
static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
@@ -479,6 +447,7 @@ int udl_modeset_init(struct drm_device *dev)
format_count, NULL, connector);
if (ret)
return ret;
+ drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane);
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 971927669d6b..b57844632dbd 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -25,46 +25,6 @@
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-/*
- * Trims identical data from front and back of line
- * Sets new front buffer address and width
- * And returns byte count of identical pixels
- * Assumes CPU natural alignment (unsigned long)
- * for back and front buffer ptrs and width
- */
-#if 0
-static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
-{
- int j, k;
- const unsigned long *back = (const unsigned long *) bback;
- const unsigned long *front = (const unsigned long *) *bfront;
- const int width = *width_bytes / sizeof(unsigned long);
- int identical = width;
- int start = width;
- int end = width;
-
- for (j = 0; j < width; j++) {
- if (back[j] != front[j]) {
- start = j;
- break;
- }
- }
-
- for (k = width - 1; k > j; k--) {
- if (back[k] != front[k]) {
- end = k+1;
- break;
- }
- }
-
- identical = start + (width - end);
- *bfront = (u8 *) &front[start];
- *width_bytes = (end - start) * sizeof(unsigned long);
-
- return identical * sizeof(unsigned long);
-}
-#endif
-
static inline u16 pixel32_to_be16(const uint32_t pixel)
{
return (((pixel >> 3) & 0x001f) |
@@ -220,7 +180,11 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(log_bpp == 1 || log_bpp == 2));
+ if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) {
+ /* need to finish URB at error from this function */
+ udl_urb_completion(urb);
+ return -EINVAL;
+ }
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;