summaryrefslogtreecommitdiffstats
path: root/drivers/char/tty_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/tty_io.c')
-rw-r--r--drivers/char/tty_io.c87
1 files changed, 59 insertions, 28 deletions
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 9d657127f313..e5953f3433f3 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -469,21 +469,19 @@ static void tty_ldisc_enable(struct tty_struct *tty)
static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
- int retval = 0;
- struct tty_ldisc o_ldisc;
+ int retval = 0;
+ struct tty_ldisc o_ldisc;
char buf[64];
int work;
unsigned long flags;
struct tty_ldisc *ld;
+ struct tty_struct *o_tty;
if ((ldisc < N_TTY) || (ldisc >= NR_LDISCS))
return -EINVAL;
restart:
- if (tty->ldisc.num == ldisc)
- return 0; /* We are already in the desired discipline */
-
ld = tty_ldisc_get(ldisc);
/* Eduardo Blanco <ejbs@cs.cs.com.uy> */
/* Cyrus Durgin <cider@speakeasy.org> */
@@ -494,45 +492,74 @@ restart:
if (ld == NULL)
return -EINVAL;
- o_ldisc = tty->ldisc;
-
tty_wait_until_sent(tty, 0);
+ if (tty->ldisc.num == ldisc) {
+ tty_ldisc_put(ldisc);
+ return 0;
+ }
+
+ o_ldisc = tty->ldisc;
+ o_tty = tty->link;
+
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
* prevents anyone taking a reference once it is clear.
* We need the lock to avoid racing reference takers.
*/
-
+
spin_lock_irqsave(&tty_ldisc_lock, flags);
- if(tty->ldisc.refcount)
- {
- /* Free the new ldisc we grabbed. Must drop the lock
- first. */
+ if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
+ if(tty->ldisc.refcount) {
+ /* Free the new ldisc we grabbed. Must drop the lock
+ first. */
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ tty_ldisc_put(ldisc);
+ /*
+ * There are several reasons we may be busy, including
+ * random momentary I/O traffic. We must therefore
+ * retry. We could distinguish between blocking ops
+ * and retries if we made tty_ldisc_wait() smarter. That
+ * is up for discussion.
+ */
+ if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+ if(o_tty && o_tty->ldisc.refcount) {
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ tty_ldisc_put(ldisc);
+ if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
+ return -ERESTARTSYS;
+ goto restart;
+ }
+ }
+
+ /* if the TTY_LDISC bit is set, then we are racing against another ldisc change */
+
+ if (!test_bit(TTY_LDISC, &tty->flags)) {
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
tty_ldisc_put(ldisc);
- /*
- * There are several reasons we may be busy, including
- * random momentary I/O traffic. We must therefore
- * retry. We could distinguish between blocking ops
- * and retries if we made tty_ldisc_wait() smarter. That
- * is up for discussion.
- */
- if(wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
- return -ERESTARTSYS;
+ ld = tty_ldisc_ref_wait(tty);
+ tty_ldisc_deref(ld);
goto restart;
}
- clear_bit(TTY_LDISC, &tty->flags);
+
+ clear_bit(TTY_LDISC, &tty->flags);
clear_bit(TTY_DONT_FLIP, &tty->flags);
+ if (o_tty) {
+ clear_bit(TTY_LDISC, &o_tty->flags);
+ clear_bit(TTY_DONT_FLIP, &o_tty->flags);
+ }
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
+
/*
* From this point on we know nobody has an ldisc
* usage reference, nor can they obtain one until
* we say so later on.
*/
-
+
work = cancel_delayed_work(&tty->flip.work);
/*
* Wait for ->hangup_work and ->flip.work handlers to terminate
@@ -583,10 +610,12 @@ restart:
*/
tty_ldisc_enable(tty);
+ if (o_tty)
+ tty_ldisc_enable(o_tty);
/* Restart it in case no characters kick it off. Safe if
already running */
- if(work)
+ if (work)
schedule_delayed_work(&tty->flip.work, 1);
return retval;
}
@@ -2425,6 +2454,7 @@ static void __do_SAK(void *arg)
int i;
struct file *filp;
struct tty_ldisc *disc;
+ struct fdtable *fdt;
if (!tty)
return;
@@ -2450,8 +2480,9 @@ static void __do_SAK(void *arg)
}
task_lock(p);
if (p->files) {
- spin_lock(&p->files->file_lock);
- for (i=0; i < p->files->max_fds; i++) {
+ rcu_read_lock();
+ fdt = files_fdtable(p->files);
+ for (i=0; i < fdt->max_fds; i++) {
filp = fcheck_files(p->files, i);
if (!filp)
continue;
@@ -2464,7 +2495,7 @@ static void __do_SAK(void *arg)
break;
}
}
- spin_unlock(&p->files->file_lock);
+ rcu_read_unlock();
}
task_unlock(p);
} while_each_task_pid(session, PIDTYPE_SID, p);