diff options
Diffstat (limited to 'arch/um/kernel/irq.c')
-rw-r--r-- | arch/um/kernel/irq.c | 534 |
1 files changed, 278 insertions, 256 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3577118bb4a5..3741d2380060 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -19,27 +19,40 @@ #include <kern_util.h> #include <os.h> #include <irq_user.h> +#include <irq_kern.h> +#include <as-layout.h> extern void free_irqs(void); /* When epoll triggers we do not know why it did so * we can also have different IRQs for read and write. - * This is why we keep a small irq_fd array for each fd - + * This is why we keep a small irq_reg array for each fd - * one entry per IRQ type */ +struct irq_reg { + void *id; + int irq; + /* it's cheaper to store this than to query it */ + int events; + bool active; + bool pending; + bool wakeup; +}; struct irq_entry { - struct irq_entry *next; + struct list_head list; int fd; - struct irq_fd *irq_array[MAX_IRQ_TYPE + 1]; + struct irq_reg reg[NUM_IRQ_TYPES]; + bool suspended; + bool sigio_workaround; }; -static struct irq_entry *active_fds; - static DEFINE_SPINLOCK(irq_lock); +static LIST_HEAD(active_fds); +static DECLARE_BITMAP(irqs_allocated, NR_IRQS); -static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs) +static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) { /* * irq->active guards against reentry @@ -49,23 +62,27 @@ static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs) */ if (irq->active) { irq->active = false; + do { irq->pending = false; do_IRQ(irq->irq, regs); - } while (irq->pending && (!irq->purge)); - if (!irq->purge) - irq->active = true; + } while (irq->pending); + + irq->active = true; } else { irq->pending = true; } } +void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) +{ + /* nothing */ +} + void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct irq_entry *irq_entry; - struct irq_fd *irq; - - int n, i, j; + int n, i; while (1) { /* This is now lockless - epoll keeps back-referencesto the irqs @@ -84,21 +101,18 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) } for (i = 0; i < n ; i++) { - /* Epoll back reference is the entry with 3 irq_fd - * leaves - one for each irq type. - */ - irq_entry = (struct irq_entry *) - os_epoll_get_data_pointer(i); - for (j = 0; j < MAX_IRQ_TYPE ; j++) { - irq = irq_entry->irq_array[j]; - if (irq == NULL) + enum um_irq_type t; + + irq_entry = os_epoll_get_data_pointer(i); + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + int events = irq_entry->reg[t].events; + + if (!events) continue; - if (os_epoll_triggered(i, irq->events) > 0) - irq_io_loop(irq, regs); - if (irq->purge) { - irq_entry->irq_array[j] = NULL; - kfree(irq); - } + + if (os_epoll_triggered(i, events) > 0) + irq_io_loop(&irq_entry->reg[t], regs); } } } @@ -106,32 +120,59 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) free_irqs(); } -static int assign_epoll_events_to_irq(struct irq_entry *irq_entry) +static struct irq_entry *get_irq_entry_by_fd(int fd) { - int i; - int events = 0; - struct irq_fd *irq; + struct irq_entry *walk; - for (i = 0; i < MAX_IRQ_TYPE ; i++) { - irq = irq_entry->irq_array[i]; - if (irq != NULL) - events = irq->events | events; - } - if (events > 0) { - /* os_add_epoll will call os_mod_epoll if this already exists */ - return os_add_epoll_fd(events, irq_entry->fd, irq_entry); + lockdep_assert_held(&irq_lock); + + list_for_each_entry(walk, &active_fds, list) { + if (walk->fd == fd) + return walk; } - /* No events - delete */ - return os_del_epoll_fd(irq_entry->fd); + + return NULL; } +static void free_irq_entry(struct irq_entry *to_free, bool remove) +{ + if (!to_free) + return; + + if (remove) + os_del_epoll_fd(to_free->fd); + list_del(&to_free->list); + kfree(to_free); +} + +static bool update_irq_entry(struct irq_entry *entry) +{ + enum um_irq_type i; + int events = 0; + + for (i = 0; i < NUM_IRQ_TYPES; i++) + events |= entry->reg[i].events; + + if (events) { + /* will modify (instead of add) if needed */ + os_add_epoll_fd(events, entry->fd, entry); + return true; + } + + os_del_epoll_fd(entry->fd); + return false; +} +static void update_or_free_irq_entry(struct irq_entry *entry) +{ + if (!update_irq_entry(entry)) + free_irq_entry(entry, false); +} -static int activate_fd(int irq, int fd, int type, void *dev_id) +static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id) { - struct irq_fd *new_fd; struct irq_entry *irq_entry; - int i, err, events; + int err, events = os_event_mask(type); unsigned long flags; err = os_set_fd_async(fd); @@ -139,73 +180,34 @@ static int activate_fd(int irq, int fd, int type, void *dev_id) goto out; spin_lock_irqsave(&irq_lock, flags); - - /* Check if we have an entry for this fd */ - - err = -EBUSY; - for (irq_entry = active_fds; - irq_entry != NULL; irq_entry = irq_entry->next) { - if (irq_entry->fd == fd) - break; - } - - if (irq_entry == NULL) { - /* This needs to be atomic as it may be called from an - * IRQ context. - */ - irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC); - if (irq_entry == NULL) { - printk(KERN_ERR - "Failed to allocate new IRQ entry\n"); + irq_entry = get_irq_entry_by_fd(fd); + if (irq_entry) { + /* cannot register the same FD twice with the same type */ + if (WARN_ON(irq_entry->reg[type].events)) { + err = -EALREADY; goto out_unlock; } - irq_entry->fd = fd; - for (i = 0; i < MAX_IRQ_TYPE; i++) - irq_entry->irq_array[i] = NULL; - irq_entry->next = active_fds; - active_fds = irq_entry; - } - - /* Check if we are trying to re-register an interrupt for a - * particular fd - */ - if (irq_entry->irq_array[type] != NULL) { - printk(KERN_ERR - "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n", - irq, fd, type, dev_id - ); - goto out_unlock; + /* temporarily disable to avoid IRQ-side locking */ + os_del_epoll_fd(fd); } else { - /* New entry for this fd */ - - err = -ENOMEM; - new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC); - if (new_fd == NULL) + irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC); + if (!irq_entry) { + err = -ENOMEM; goto out_unlock; - - events = os_event_mask(type); - - *new_fd = ((struct irq_fd) { - .id = dev_id, - .irq = irq, - .type = type, - .events = events, - .active = true, - .pending = false, - .purge = false - }); - /* Turn off any IO on this fd - allows us to - * avoid locking the IRQ loop - */ - os_del_epoll_fd(irq_entry->fd); - irq_entry->irq_array[type] = new_fd; + } + irq_entry->fd = fd; + list_add_tail(&irq_entry->list, &active_fds); + maybe_sigio_broken(fd); } - /* Turn back IO on with the correct (new) IO event mask */ - assign_epoll_events_to_irq(irq_entry); + irq_entry->reg[type].id = dev_id; + irq_entry->reg[type].irq = irq; + irq_entry->reg[type].active = true; + irq_entry->reg[type].events = events; + + WARN_ON(!update_irq_entry(irq_entry)); spin_unlock_irqrestore(&irq_lock, flags); - maybe_sigio_broken(fd, (type != IRQ_NONE)); return 0; out_unlock: @@ -215,104 +217,10 @@ out: } /* - * Walk the IRQ list and dispose of any unused entries. - * Should be done under irq_lock. + * Remove the entry or entries for a specific FD, if you + * don't want to remove all the possible entries then use + * um_free_irq() or deactivate_fd() instead. */ - -static void garbage_collect_irq_entries(void) -{ - int i; - bool reap; - struct irq_entry *walk; - struct irq_entry *previous = NULL; - struct irq_entry *to_free; - - if (active_fds == NULL) - return; - walk = active_fds; - while (walk != NULL) { - reap = true; - for (i = 0; i < MAX_IRQ_TYPE ; i++) { - if (walk->irq_array[i] != NULL) { - reap = false; - break; - } - } - if (reap) { - if (previous == NULL) - active_fds = walk->next; - else - previous->next = walk->next; - to_free = walk; - } else { - to_free = NULL; - } - walk = walk->next; - kfree(to_free); - } -} - -/* - * Walk the IRQ list and get the descriptor for our FD - */ - -static struct irq_entry *get_irq_entry_by_fd(int fd) -{ - struct irq_entry *walk = active_fds; - - while (walk != NULL) { - if (walk->fd == fd) - return walk; - walk = walk->next; - } - return NULL; -} - - -/* - * Walk the IRQ list and dispose of an entry for a specific - * device, fd and number. Note - if sharing an IRQ for read - * and writefor the same FD it will be disposed in either case. - * If this behaviour is undesirable use different IRQ ids. - */ - -#define IGNORE_IRQ 1 -#define IGNORE_DEV (1<<1) - -static void do_free_by_irq_and_dev( - struct irq_entry *irq_entry, - unsigned int irq, - void *dev, - int flags -) -{ - int i; - struct irq_fd *to_free; - - for (i = 0; i < MAX_IRQ_TYPE ; i++) { - if (irq_entry->irq_array[i] != NULL) { - if ( - ((flags & IGNORE_IRQ) || - (irq_entry->irq_array[i]->irq == irq)) && - ((flags & IGNORE_DEV) || - (irq_entry->irq_array[i]->id == dev)) - ) { - /* Turn off any IO on this fd - allows us to - * avoid locking the IRQ loop - */ - os_del_epoll_fd(irq_entry->fd); - to_free = irq_entry->irq_array[i]; - irq_entry->irq_array[i] = NULL; - assign_epoll_events_to_irq(irq_entry); - if (to_free->active) - to_free->purge = true; - else - kfree(to_free); - } - } - } -} - void free_irq_by_fd(int fd) { struct irq_entry *to_free; @@ -320,58 +228,64 @@ void free_irq_by_fd(int fd) spin_lock_irqsave(&irq_lock, flags); to_free = get_irq_entry_by_fd(fd); - if (to_free != NULL) { - do_free_by_irq_and_dev( - to_free, - -1, - NULL, - IGNORE_IRQ | IGNORE_DEV - ); - } - garbage_collect_irq_entries(); + free_irq_entry(to_free, true); spin_unlock_irqrestore(&irq_lock, flags); } EXPORT_SYMBOL(free_irq_by_fd); static void free_irq_by_irq_and_dev(unsigned int irq, void *dev) { - struct irq_entry *to_free; + struct irq_entry *entry; unsigned long flags; spin_lock_irqsave(&irq_lock, flags); - to_free = active_fds; - while (to_free != NULL) { - do_free_by_irq_and_dev( - to_free, - irq, - dev, - 0 - ); - to_free = to_free->next; + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type i; + + for (i = 0; i < NUM_IRQ_TYPES; i++) { + struct irq_reg *reg = &entry->reg[i]; + + if (!reg->events) + continue; + if (reg->irq != irq) + continue; + if (reg->id != dev) + continue; + + os_del_epoll_fd(entry->fd); + reg->events = 0; + update_or_free_irq_entry(entry); + goto out; + } } - garbage_collect_irq_entries(); +out: spin_unlock_irqrestore(&irq_lock, flags); } - void deactivate_fd(int fd, int irqnum) { - struct irq_entry *to_free; + struct irq_entry *entry; unsigned long flags; + enum um_irq_type i; os_del_epoll_fd(fd); + spin_lock_irqsave(&irq_lock, flags); - to_free = get_irq_entry_by_fd(fd); - if (to_free != NULL) { - do_free_by_irq_and_dev( - to_free, - irqnum, - NULL, - IGNORE_DEV - ); + entry = get_irq_entry_by_fd(fd); + if (!entry) + goto out; + + for (i = 0; i < NUM_IRQ_TYPES; i++) { + if (!entry->reg[i].events) + continue; + if (entry->reg[i].irq == irqnum) + entry->reg[i].events = 0; } - garbage_collect_irq_entries(); + + update_or_free_irq_entry(entry); +out: spin_unlock_irqrestore(&irq_lock, flags); + ignore_sigio_fd(fd); } EXPORT_SYMBOL(deactivate_fd); @@ -384,24 +298,17 @@ EXPORT_SYMBOL(deactivate_fd); */ int deactivate_all_fds(void) { - struct irq_entry *to_free; + struct irq_entry *entry; /* Stop IO. The IRQ loop has no lock so this is our * only way of making sure we are safe to dispose * of all IRQ handlers */ os_set_ioignore(); - to_free = active_fds; - while (to_free != NULL) { - do_free_by_irq_and_dev( - to_free, - -1, - NULL, - IGNORE_IRQ | IGNORE_DEV - ); - to_free = to_free->next; - } - /* don't garbage collect - we can no longer call kfree() here */ + + /* we can no longer call kfree() here so just deactivate */ + list_for_each_entry(entry, &active_fds, list) + os_del_epoll_fd(entry->fd); os_close_epoll_fd(); return 0; } @@ -421,31 +328,146 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs) return 1; } -void um_free_irq(unsigned int irq, void *dev) +void um_free_irq(int irq, void *dev) { + if (WARN(irq < 0 || irq > NR_IRQS, "freeing invalid irq %d", irq)) + return; + free_irq_by_irq_and_dev(irq, dev); free_irq(irq, dev); + clear_bit(irq, irqs_allocated); } EXPORT_SYMBOL(um_free_irq); -int um_request_irq(unsigned int irq, int fd, int type, - irq_handler_t handler, - unsigned long irqflags, const char * devname, - void *dev_id) +int um_request_irq(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id) { int err; + if (irq == UM_IRQ_ALLOC) { + int i; + + for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) { + if (!test_and_set_bit(i, irqs_allocated)) { + irq = i; + break; + } + } + } + + if (irq < 0) + return -ENOSPC; + if (fd != -1) { err = activate_fd(irq, fd, type, dev_id); if (err) - return err; + goto error; } - return request_irq(irq, handler, irqflags, devname, dev_id); -} + err = request_irq(irq, handler, irqflags, devname, dev_id); + if (err < 0) + goto error; + return irq; +error: + clear_bit(irq, irqs_allocated); + return err; +} EXPORT_SYMBOL(um_request_irq); +#ifdef CONFIG_PM_SLEEP +void um_irqs_suspend(void) +{ + struct irq_entry *entry; + unsigned long flags; + + sig_info[SIGIO] = sigio_handler_suspend; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + bool wake = false; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + if (!entry->reg[t].events) + continue; + + /* + * For the SIGIO_WRITE_IRQ, which is used to handle the + * SIGIO workaround thread, we need special handling: + * enable wake for it itself, but below we tell it about + * any FDs that should be suspended. + */ + if (entry->reg[t].wakeup || + entry->reg[t].irq == SIGIO_WRITE_IRQ) { + wake = true; + break; + } + } + + if (!wake) { + entry->suspended = true; + os_clear_fd_async(entry->fd); + entry->sigio_workaround = + !__ignore_sigio_fd(entry->fd); + } + } + spin_unlock_irqrestore(&irq_lock, flags); +} + +void um_irqs_resume(void) +{ + struct irq_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + if (entry->suspended) { + int err = os_set_fd_async(entry->fd); + + WARN(err < 0, "os_set_fd_async returned %d\n", err); + entry->suspended = false; + + if (entry->sigio_workaround) { + err = __add_sigio_fd(entry->fd); + WARN(err < 0, "add_sigio_returned %d\n", err); + } + } + } + spin_unlock_irqrestore(&irq_lock, flags); + + sig_info[SIGIO] = sigio_handler; + send_sigio_to_self(); +} + +static int normal_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct irq_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + if (!entry->reg[t].events) + continue; + + if (entry->reg[t].irq != d->irq) + continue; + entry->reg[t].wakeup = on; + goto unlock; + } + } +unlock: + spin_unlock_irqrestore(&irq_lock, flags); + return 0; +} +#else +#define normal_irq_set_wake NULL +#endif + /* * irq_chip must define at least enable/disable and ack when * the edge handler is used. @@ -454,7 +476,7 @@ static void dummy(struct irq_data *d) { } -/* This is used for everything else than the timer. */ +/* This is used for everything other than the timer. */ static struct irq_chip normal_irq_type = { .name = "SIGIO", .irq_disable = dummy, @@ -462,10 +484,11 @@ static struct irq_chip normal_irq_type = { .irq_ack = dummy, .irq_mask = dummy, .irq_unmask = dummy, + .irq_set_wake = normal_irq_set_wake, }; -static struct irq_chip SIGVTALRM_irq_type = { - .name = "SIGVTALRM", +static struct irq_chip alarm_irq_type = { + .name = "SIGALRM", .irq_disable = dummy, .irq_enable = dummy, .irq_ack = dummy, @@ -477,10 +500,9 @@ void __init init_IRQ(void) { int i; - irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq); - + irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq); - for (i = 1; i <= LAST_IRQ; i++) + for (i = 1; i < NR_IRQS; i++) irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq); /* Initialize EPOLL Loop */ os_setup_epoll(); |