diff options
author | Steve French <sfrench@us.ibm.com> | 2006-01-23 21:51:00 +0100 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2006-01-23 21:51:00 +0100 |
commit | 4c8af5254e741983e141e10002e01abba87f8419 (patch) | |
tree | a932954630715a0e9b4f20fdfe74255b441096c5 | |
parent | [CIFS] Do not zero non-existent iovec in SendReceive response processing. (diff) | |
parent | [SPARC64]: Use compat_sys_futimesat in 32-bit syscall table. (diff) | |
download | linux-4c8af5254e741983e141e10002e01abba87f8419.tar.xz linux-4c8af5254e741983e141e10002e01abba87f8419.zip |
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Signed-off-by: Steve French <sfrench@us.ibm.com>
45 files changed, 540 insertions, 656 deletions
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 3945d378bd7e..70dba1f0e2ee 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -52,9 +52,9 @@ #include <linux/compat.h> #include <linux/vfs.h> #include <linux/mman.h> +#include <linux/mutex.h> #include <asm/intrinsics.h> -#include <asm/semaphore.h> #include <asm/types.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -86,7 +86,7 @@ * while doing so. */ /* XXX make per-mm: */ -static DECLARE_MUTEX(ia32_mmap_sem); +static DEFINE_MUTEX(ia32_mmap_mutex); asmlinkage long sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, @@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot prot = get_prot32(prot); #if PAGE_SHIFT > IA32_PAGE_SHIFT - down(&ia32_mmap_sem); + mutex_lock(&ia32_mmap_mutex); { addr = emulate_mmap(file, addr, len, prot, flags, offset); } - up(&ia32_mmap_sem); + mutex_unlock(&ia32_mmap_mutex); #else down_write(¤t->mm->mmap_sem); { @@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len) if (start >= end) return 0; - down(&ia32_mmap_sem); - { - ret = sys_munmap(start, end - start); - } - up(&ia32_mmap_sem); + mutex_lock(&ia32_mmap_mutex); + ret = sys_munmap(start, end - start); + mutex_unlock(&ia32_mmap_mutex); #endif return ret; } @@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) if (retval < 0) return retval; - down(&ia32_mmap_sem); + mutex_lock(&ia32_mmap_mutex); { if (offset_in_page(start)) { /* start address is 4KB aligned but not page aligned. */ @@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) retval = sys_mprotect(start, end - start, prot); } out: - up(&ia32_mmap_sem); + mutex_unlock(&ia32_mmap_mutex); return retval; #endif } @@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, old_len = PAGE_ALIGN(old_end) - addr; new_len = PAGE_ALIGN(new_end) - addr; - down(&ia32_mmap_sem); - { - ret = sys_mremap(addr, old_len, new_len, flags, new_addr); - } - up(&ia32_mmap_sem); + mutex_lock(&ia32_mmap_mutex); + ret = sys_mremap(addr, old_len, new_len, flags, new_addr); + mutex_unlock(&ia32_mmap_mutex); if ((ret >= 0) && (old_len < new_len)) { /* mremap expanded successfully */ diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 2ea4b39efffa..9c5194b385da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -40,6 +40,7 @@ #include <linux/bitops.h> #include <linux/capability.h> #include <linux/rcupdate.h> +#include <linux/completion.h> #include <asm/errno.h> #include <asm/intrinsics.h> @@ -286,7 +287,7 @@ typedef struct pfm_context { unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ - struct semaphore ctx_restart_sem; /* use for blocking notification mode */ + struct completion ctx_restart_done; /* use for blocking notification mode */ unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ @@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp) /* * force task to wake up from MASKED state */ - up(&ctx->ctx_restart_sem); + complete(&ctx->ctx_restart_done); DPRINT(("waking up ctx_state=%d\n", state)); @@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg /* * init restart semaphore to locked */ - sema_init(&ctx->ctx_restart_sem, 0); + init_completion(&ctx->ctx_restart_done); /* * activation is used in SMP only @@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { DPRINT(("unblocking [%d] \n", task->pid)); - up(&ctx->ctx_restart_sem); + complete(&ctx->ctx_restart_done); } else { DPRINT(("[%d] armed exit trap\n", task->pid)); @@ -5089,7 +5090,7 @@ pfm_handle_work(void) * may go through without blocking on SMP systems * if restart has been received already by the time we call down() */ - ret = down_interruptible(&ctx->ctx_restart_sem); + ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); DPRINT(("after block sleeping ret=%d\n", ret)); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index b631cf86ed44..fcd2bad0286f 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg) dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); + touch_softlockup_watchdog(); memset((char *)start, 0, length); node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 7c88e9a58516..8182583c762c 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h @@ -51,6 +51,15 @@ struct sn_flush_device_kernel { struct sn_flush_device_common *common; }; +/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included + * for older official PROMs to function on the new kernel base. This struct + * will be removed when the next official PROM release occurs. */ + +struct sn_flush_device_war { + struct sn_flush_device_common common; + u32 filler; /* older PROMs expect the default size of a spinlock_t */ +}; + /* * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. */ diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 233d55115d33..00700f7e6837 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -165,8 +165,45 @@ sn_pcidev_info_get(struct pci_dev *dev) return NULL; } +/* Older PROM flush WAR + * + * 01/16/06 -- This war will be in place until a new official PROM is released. + * Additionally note that the struct sn_flush_device_war also has to be + * removed from arch/ia64/sn/include/xtalk/hubdev.h + */ +static u8 war_implemented = 0; + +static void sn_device_fixup_war(u64 nasid, u64 widget, int device, + struct sn_flush_device_common *common) +{ + struct sn_flush_device_war *war_list; + struct sn_flush_device_war *dev_entry; + struct ia64_sal_retval isrv = {0,0,0,0}; + + if (!war_implemented) { + printk(KERN_WARNING "PROM version < 4.50 -- implementing old " + "PROM flush WAR\n"); + war_implemented = 1; + } + + war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); + if (!war_list) + BUG(); + + SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, + nasid, widget, __pa(war_list), 0, 0, 0 ,0); + if (isrv.status) + panic("sn_device_fixup_war failed: %s\n", + ia64_sal_strerror(isrv.status)); + + dev_entry = war_list + device; + memcpy(common,dev_entry, sizeof(*common)); + + kfree(war_list); +} + /* - * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for + * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for * each node in the system. */ static void sn_fixup_ionodes(void) @@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void) widget, device, (u64)(dev_entry->common)); - if (status) - BUG(); + if (status) { + if (sn_sal_rev() < 0x0450) { + /* shortlived WAR for older + * PROM images + */ + sn_device_fixup_war(nasid, + widget, + device, + dev_entry->common); + } + else + BUG(); + } spin_lock_init(&dev_entry->sfdl_flush_lock); } diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 6546db6abdba..9ab684d1bb55 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/vmalloc.h> +#include <linux/mutex.h> #include <asm/mca.h> #include <asm/sal.h> #include <asm/sn/sn_sal.h> @@ -27,7 +28,7 @@ void sn_init_cpei_timer(void); /* Printing oemdata from mca uses data that is not passed through SAL, it is * global. Only one user at a time. */ -static DECLARE_MUTEX(sn_oemdata_mutex); +static DEFINE_MUTEX(sn_oemdata_mutex); static u8 **sn_oemdata; static u64 *sn_oemdata_size, sn_oemdata_bufsize; @@ -89,7 +90,7 @@ static int sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, u64 * oemdata_size) { - down(&sn_oemdata_mutex); + mutex_lock(&sn_oemdata_mutex); sn_oemdata = oemdata; sn_oemdata_size = oemdata_size; sn_oemdata_bufsize = 0; @@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, *sn_oemdata_size = 0; ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); } - up(&sn_oemdata_mutex); + mutex_unlock(&sn_oemdata_mutex); return 0; } diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c index 3be52a34c80f..b7ea46645e12 100644 --- a/arch/ia64/sn/kernel/xp_main.c +++ b/arch/ia64/sn/kernel/xp_main.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/mutex.h> #include <asm/sn/intr.h> #include <asm/sn/sn_sal.h> #include <asm/sn/xp.h> @@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, registration = &xpc_registrations[ch_number]; - if (down_interruptible(®istration->sema) != 0) { + if (mutex_lock_interruptible(®istration->mutex) != 0) { return xpcInterrupted; } /* if XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func != NULL) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return xpcAlreadyRegistered; } @@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, registration->key = key; registration->func = func; - up(®istration->sema); + mutex_unlock(®istration->mutex); xpc_interface.connect(ch_number); @@ -190,11 +191,11 @@ xpc_disconnect(int ch_number) * figured XPC's users will just turn around and call xpc_disconnect() * again anyways, so we might as well wait, if need be. */ - down(®istration->sema); + mutex_lock(®istration->mutex); /* if !XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func == NULL) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return; } @@ -208,7 +209,7 @@ xpc_disconnect(int ch_number) xpc_interface.disconnect(ch_number); - up(®istration->sema); + mutex_unlock(®istration->mutex); return; } @@ -250,9 +251,9 @@ xp_init(void) xp_nofault_PIOR_target = SH1_IPI_ACCESS; } - /* initialize the connection registration semaphores */ + /* initialize the connection registration mutex */ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { - sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ + mutex_init(&xpc_registrations[ch_number].mutex); } return 0; diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 0c0a68902409..8d950c778bb6 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -22,6 +22,8 @@ #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/completion.h> #include <asm/sn/bte.h> #include <asm/sn/sn_sal.h> #include <asm/sn/xpc.h> @@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) atomic_set(&ch->n_to_notify, 0); spin_lock_init(&ch->lock); - sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ - sema_init(&ch->wdisconnect_sema, 0); /* event wait */ + mutex_init(&ch->msg_to_pull_mutex); + init_completion(&ch->wdisconnect_wait); atomic_set(&ch->n_on_msg_allocate_wq, 0); init_waitqueue_head(&ch->msg_allocate_wq); @@ -534,7 +536,6 @@ static enum xpc_retval xpc_allocate_msgqueues(struct xpc_channel *ch) { unsigned long irq_flags; - int i; enum xpc_retval ret; @@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) return ret; } - for (i = 0; i < ch->local_nentries; i++) { - /* use a semaphore as an event wait queue */ - sema_init(&ch->notify_queue[i].sema, 0); - } - spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_SETUP; spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) } if (ch->flags & XPC_C_WDISCONNECT) { - spin_unlock_irqrestore(&ch->lock, *irq_flags); - up(&ch->wdisconnect_sema); - spin_lock_irqsave(&ch->lock, *irq_flags); - + /* we won't lose the CPU since we're holding ch->lock */ + complete(&ch->wdisconnect_wait); } else if (ch->delayed_IPI_flags) { if (part->act_state != XPC_P_DEACTIVATING) { /* time to take action on any delayed IPI flags */ @@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch) struct xpc_registration *registration = &xpc_registrations[ch->number]; - if (down_trylock(®istration->sema) != 0) { + if (mutex_trylock(®istration->mutex) == 0) { return xpcRetry; } if (!XPC_CHANNEL_REGISTERED(ch->number)) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return xpcUnregistered; } @@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch) if (ch->flags & XPC_C_DISCONNECTING) { spin_unlock_irqrestore(&ch->lock, irq_flags); - up(®istration->sema); + mutex_unlock(®istration->mutex); return ch->reason; } @@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch) * channel lock be locked and will unlock and relock * the channel lock as needed. */ - up(®istration->sema); + mutex_unlock(®istration->mutex); XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch) atomic_inc(&xpc_partitions[ch->partid].nchannels_active); } - up(®istration->sema); + mutex_unlock(®istration->mutex); /* initiate the connection */ @@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) enum xpc_retval ret; - if (down_interruptible(&ch->msg_to_pull_sema) != 0) { + if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { /* we were interrupted by a signal */ return NULL; } @@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) XPC_DEACTIVATE_PARTITION(part, ret); - up(&ch->msg_to_pull_sema); + mutex_unlock(&ch->msg_to_pull_mutex); return NULL; } @@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ch->next_msg_to_pull += nmsgs; } - up(&ch->msg_to_pull_sema); + mutex_unlock(&ch->msg_to_pull_mutex); /* return the message we were looking for */ msg_offset = (get % ch->remote_nentries) * ch->msg_size; diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 8930586e0eb4..c75f8aeefc2b 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -55,6 +55,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/reboot.h> +#include <linux/completion.h> #include <asm/sn/intr.h> #include <asm/sn/sn_sal.h> #include <asm/kdebug.h> @@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); static unsigned long xpc_hb_check_timeout; /* notification that the xpc_hb_checker thread has exited */ -static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); +static DECLARE_COMPLETION(xpc_hb_checker_exited); /* notification that the xpc_discovery thread has exited */ -static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); +static DECLARE_COMPLETION(xpc_discovery_exited); static struct timer_list xpc_hb_timer; @@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore) /* mark this thread as having exited */ - up(&xpc_hb_checker_exited); + complete(&xpc_hb_checker_exited); return 0; } @@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore) dev_dbg(xpc_part, "discovery thread is exiting\n"); /* mark this thread as having exited */ - up(&xpc_discovery_exited); + complete(&xpc_discovery_exited); return 0; } @@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number) continue; } - (void) down(&ch->wdisconnect_sema); + wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); @@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason) free_irq(SGI_XPC_ACTIVATE, NULL); /* wait for the discovery thread to exit */ - down(&xpc_discovery_exited); + wait_for_completion(&xpc_discovery_exited); /* wait for the heartbeat checker thread to exit */ - down(&xpc_hb_checker_exited); + wait_for_completion(&xpc_hb_checker_exited); /* sleep for a 1/3 of a second or so */ @@ -1367,7 +1368,7 @@ xpc_init(void) dev_err(xpc_part, "failed while forking discovery thread\n"); /* mark this new thread as a non-starter */ - up(&xpc_discovery_exited); + complete(&xpc_discovery_exited); xpc_do_exit(xpcUnloading); return -EBUSY; diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 77a1262751d3..2fac27049bf6 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) { struct ia64_sal_retval ret_stuff; u64 busnum; + u64 segment; ret_stuff.status = 0; ret_stuff.v0 = 0; + segment = soft->pbi_buscommon.bs_persist_segment; busnum = soft->pbi_buscommon.bs_persist_busnum; - SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, - (u64) device, (u64) resp, 0, 0, 0, 0); + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, + busnum, (u64) device, (u64) resp, 0, 0, 0); return (int)ret_stuff.v0; } @@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, { struct ia64_sal_retval ret_stuff; u64 busnum; + u64 segment; ret_stuff.status = 0; ret_stuff.v0 = 0; + segment = soft->pbi_buscommon.bs_persist_segment; busnum = soft->pbi_buscommon.bs_persist_busnum; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, - (u64) busnum, (u64) device, (u64) action, - (u64) resp, 0, 0, 0); + segment, busnum, (u64) device, (u64) action, + (u64) resp, 0, 0); return (int)ret_stuff.v0; } diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 03ecb4e4614e..c51d08d218ef 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1277,62 +1277,6 @@ sys_sigstack: mov %l5, %o7 .align 4 - .globl sys_sigpause -sys_sigpause: - /* Note: %o0 already has correct value... */ - call do_sigpause - add %sp, STACKFRAME_SZ, %o1 - - ld [%curptr + TI_FLAGS], %l5 - andcc %l5, _TIF_SYSCALL_TRACE, %g0 - be 1f - nop - - call syscall_trace - nop - -1: - /* We are returning to a signal handler. */ - RESTORE_ALL - - .align 4 - .globl sys_sigsuspend -sys_sigsuspend: - call do_sigsuspend - add %sp, STACKFRAME_SZ, %o0 - - ld [%curptr + TI_FLAGS], %l5 - andcc %l5, _TIF_SYSCALL_TRACE, %g0 - be 1f - nop - - call syscall_trace - nop - -1: - /* We are returning to a signal handler. */ - RESTORE_ALL - - .align 4 - .globl sys_rt_sigsuspend -sys_rt_sigsuspend: - /* Note: %o0, %o1 already have correct value... */ - call do_rt_sigsuspend - add %sp, STACKFRAME_SZ, %o2 - - ld [%curptr + TI_FLAGS], %l5 - andcc %l5, _TIF_SYSCALL_TRACE, %g0 - be 1f - nop - - call syscall_trace - nop - -1: - /* We are returning to a signal handler. */ - RESTORE_ALL - - .align 4 .globl sys_sigreturn sys_sigreturn: call do_sigreturn diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S index f7460d897e79..77ca6fd81253 100644 --- a/arch/sparc/kernel/rtrap.S +++ b/arch/sparc/kernel/rtrap.S @@ -68,15 +68,14 @@ ret_trap_lockless_ipi: ld [%curptr + TI_FLAGS], %g2 signal_p: - andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0 + andcc %g2, (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %g0 bz,a ret_trap_continue ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr - clr %o0 - mov %l5, %o2 - mov %l6, %o3 + mov %l5, %o1 + mov %l6, %o2 call do_signal - add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr + add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr /* Fall through. */ ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c index 5f34d7dc2b89..0748d8147bbf 100644 --- a/arch/sparc/kernel/signal.c +++ b/arch/sparc/kernel/signal.c @@ -35,9 +35,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, void *fpqueue, unsigned long *fpqdepth); extern void fpload(unsigned long *fpregs, unsigned long *fsr); -asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, - unsigned long orig_o0, int restart_syscall); - /* Signal frames: the original one (compatible with SunOS): * * Set up a signal frame... Make the stack look the way SunOS @@ -95,98 +92,30 @@ struct rt_signal_frame { #define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) -/* - * atomically swap in the new signal mask, and wait for a signal. - * This is really tricky on the Sparc, watch out... - */ -asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs) +static int _sigpause_common(old_sigset_t set) { - sigset_t saveset; - set &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - regs->pc = regs->npc; - regs->npc += 4; - - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->psr |= PSR_C; - regs->u_regs[UREG_I0] = EINTR; - if (do_signal(&saveset, regs, 0, 0)) - return; - } -} + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); -asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs) -{ - _sigpause_common(set, regs); + return -ERESTARTNOHAND; } -asmlinkage void do_sigsuspend (struct pt_regs *regs) +asmlinkage int sys_sigpause(unsigned int set) { - _sigpause_common(regs->u_regs[UREG_I0], regs); + return _sigpause_common(set); } -asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, - struct pt_regs *regs) +asmlinkage int sys_sigsuspend(old_sigset_t set) { - sigset_t oldset, set; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(sigset_t)) { - regs->psr |= PSR_C; - regs->u_regs[UREG_I0] = EINVAL; - return; - } - - if (copy_from_user(&set, uset, sizeof(set))) { - regs->psr |= PSR_C; - regs->u_regs[UREG_I0] = EFAULT; - return; - } - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - oldset = current->blocked; - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - regs->pc = regs->npc; - regs->npc += 4; - - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->psr |= PSR_C; - regs->u_regs[UREG_I0] = EINTR; - if (do_signal(&oldset, regs, 0, 0)) - return; - } + return _sigpause_common(set); } static inline int @@ -1067,13 +996,13 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, - unsigned long orig_i0, int restart_syscall) +asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall) { siginfo_t info; struct sparc_deliver_cookie cookie; struct k_sigaction ka; int signr; + sigset_t *oldset; /* * XXX Disable svr4 signal handling until solaris emulation works. @@ -1089,7 +1018,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, cookie.restart_syscall = restart_syscall; cookie.orig_i0 = orig_i0; - if (!oldset) + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, &cookie); @@ -1098,7 +1029,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, syscall_restart(cookie.orig_i0, regs, &ka.sa); handle_signal(signr, &ka, &info, oldset, regs, svr4_signal); - return 1; + /* a signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + return; } if (cookie.restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || @@ -1115,7 +1053,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, regs->pc -= 4; regs->npc -= 4; } - return 0; + + /* if there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } asmlinkage int diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 0b0d492c953b..19b25399d7e4 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -66,7 +66,6 @@ struct poll { extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *); extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *); -void _sigpause_common (unsigned int set, struct pt_regs *); extern void (*__copy_1page)(void *, const void *); extern void __memmove(void *, const void *, __kernel_size_t); extern void (*bzero_1page)(void *); @@ -227,7 +226,6 @@ EXPORT_SYMBOL(kunmap_atomic); /* Solaris/SunOS binary compatibility */ EXPORT_SYMBOL(svr4_setcontext); EXPORT_SYMBOL(svr4_getcontext); -EXPORT_SYMBOL(_sigpause_common); EXPORT_SYMBOL(dump_thread); diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S index e457a40838fc..6877ae4cd1d9 100644 --- a/arch/sparc/kernel/systbls.S +++ b/arch/sparc/kernel/systbls.S @@ -75,7 +75,10 @@ sys_call_table: /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy /*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink /*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid -/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl +/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat +/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat +/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat +/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll #ifdef CONFIG_SUNOS_EMUL /* Now the SunOS syscall table. */ @@ -181,6 +184,11 @@ sunos_sys_table: .long sunos_nosys, sunos_nosys, sunos_nosys .long sunos_nosys /*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys + .long sunos_nosys, sunos_nosys, sunos_nosys + .long sunos_nosys, sunos_nosys, sunos_nosys .long sunos_nosys +/*290*/ .long sunos_nosys, sunos_nosys, sunos_nosys + .long sunos_nosys, sunos_nosys, sunos_nosys + .long sunos_nosys, sunos_nosys, sunos_nosys #endif diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 710002991888..e50e56e4ab61 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1416,7 +1416,6 @@ execve_merge: add %sp, PTREGS_OFF, %o0 .globl sys_pipe, sys_sigpause, sys_nis_syscall - .globl sys_sigsuspend, sys_rt_sigsuspend .globl sys_rt_sigreturn .globl sys_ptrace .globl sys_sigaltstack @@ -1440,28 +1439,6 @@ sys32_sigaltstack: mov %i6, %o2 #endif .align 32 -sys_sigsuspend: add %sp, PTREGS_OFF, %o0 - call do_sigsuspend - add %o7, 1f-.-4, %o7 - nop -sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ - add %sp, PTREGS_OFF, %o2 - call do_rt_sigsuspend - add %o7, 1f-.-4, %o7 - nop -#ifdef CONFIG_COMPAT - .globl sys32_rt_sigsuspend -sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ - srl %o0, 0, %o0 - add %sp, PTREGS_OFF, %o2 - call do_rt_sigsuspend32 - add %o7, 1f-.-4, %o7 -#endif - /* NOTE: %o0 has a correct value already */ -sys_sigpause: add %sp, PTREGS_OFF, %o1 - call do_sigpause - add %o7, 1f-.-4, %o7 - nop #ifdef CONFIG_COMPAT .globl sys32_sigreturn sys32_sigreturn: diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 090dcca00d2a..b80eba0081ca 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -53,14 +53,13 @@ __handle_user_windows: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate ldx [%g6 + TI_FLAGS], %l0 -1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 +1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0 be,pt %xcc, __handle_user_windows_continue nop - clr %o0 - mov %l5, %o2 - mov %l6, %o3 - add %sp, PTREGS_OFF, %o1 - mov %l0, %o4 + mov %l5, %o1 + mov %l6, %o2 + add %sp, PTREGS_OFF, %o0 + mov %l0, %o3 call do_notify_resume wrpr %g0, RTRAP_PSTATE, %pstate @@ -96,15 +95,14 @@ __handle_perfctrs: wrpr %g0, RTRAP_PSTATE, %pstate wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate ldx [%g6 + TI_FLAGS], %l0 -1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 +1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0 be,pt %xcc, __handle_perfctrs_continue sethi %hi(TSTATE_PEF), %o0 - clr %o0 - mov %l5, %o2 - mov %l6, %o3 - add %sp, PTREGS_OFF, %o1 - mov %l0, %o4 + mov %l5, %o1 + mov %l6, %o2 + add %sp, PTREGS_OFF, %o0 + mov %l0, %o3 call do_notify_resume wrpr %g0, RTRAP_PSTATE, %pstate @@ -129,11 +127,10 @@ __handle_userfpu: ba,a,pt %xcc, __handle_userfpu_continue __handle_signal: - clr %o0 - mov %l5, %o2 - mov %l6, %o3 - add %sp, PTREGS_OFF, %o1 - mov %l0, %o4 + mov %l5, %o1 + mov %l6, %o2 + add %sp, PTREGS_OFF, %o0 + mov %l0, %o3 call do_notify_resume wrpr %g0, RTRAP_PSTATE, %pstate wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate @@ -200,7 +197,7 @@ __handle_preemption_continue: andcc %l1, %o0, %g0 andcc %l0, _TIF_NEED_RESCHED, %g0 bne,pn %xcc, __handle_preemption - andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0 + andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0 bne,pn %xcc, __handle_signal __handle_signal_continue: ldub [%g6 + TI_WSAVED], %o2 diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c index 60f5dfabb1e1..ca11a4c457d4 100644 --- a/arch/sparc64/kernel/signal.c +++ b/arch/sparc64/kernel/signal.c @@ -36,9 +36,6 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -static int do_signal(sigset_t *oldset, struct pt_regs * regs, - unsigned long orig_o0, int ret_from_syscall); - /* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { @@ -242,114 +239,29 @@ struct rt_signal_frame { /* Align macros */ #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) -/* - * atomically swap in the new signal mask, and wait for a signal. - * This is really tricky on the Sparc, watch out... - */ -asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs) +static long _sigpause_common(old_sigset_t set) { - sigset_t saveset; - -#ifdef CONFIG_SPARC32_COMPAT - if (test_thread_flag(TIF_32BIT)) { - extern asmlinkage void _sigpause32_common(compat_old_sigset_t, - struct pt_regs *); - _sigpause32_common(set, regs); - return; - } -#endif set &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - - if (test_thread_flag(TIF_32BIT)) { - regs->tpc = (regs->tnpc & 0xffffffff); - regs->tnpc = (regs->tnpc + 4) & 0xffffffff; - } else { - regs->tpc = regs->tnpc; - regs->tnpc += 4; - } - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); - regs->u_regs[UREG_I0] = EINTR; - if (do_signal(&saveset, regs, 0, 0)) - return; - } + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; } -asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs) +asmlinkage long sys_sigpause(unsigned int set) { - _sigpause_common(set, regs); + return _sigpause_common(set); } -asmlinkage void do_sigsuspend(struct pt_regs *regs) +asmlinkage long sys_sigsuspend(old_sigset_t set) { - _sigpause_common(regs->u_regs[UREG_I0], regs); -} - -asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs) -{ - sigset_t oldset, set; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(sigset_t)) { - regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); - regs->u_regs[UREG_I0] = EINVAL; - return; - } - if (copy_from_user(&set, uset, sizeof(set))) { - regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); - regs->u_regs[UREG_I0] = EFAULT; - return; - } - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - oldset = current->blocked; - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (test_thread_flag(TIF_32BIT)) { - regs->tpc = (regs->tnpc & 0xffffffff); - regs->tnpc = (regs->tnpc + 4) & 0xffffffff; - } else { - regs->tpc = regs->tnpc; - regs->tnpc += 4; - } - - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY); - regs->u_regs[UREG_I0] = EINTR; - if (do_signal(&oldset, regs, 0, 0)) - return; - } + return _sigpause_common(set); } static inline int @@ -607,26 +519,29 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -static int do_signal(sigset_t *oldset, struct pt_regs * regs, - unsigned long orig_i0, int restart_syscall) +static void do_signal(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall) { siginfo_t info; struct signal_deliver_cookie cookie; struct k_sigaction ka; int signr; + sigset_t *oldset; cookie.restart_syscall = restart_syscall; cookie.orig_i0 = orig_i0; - if (!oldset) + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else oldset = ¤t->blocked; #ifdef CONFIG_SPARC32_COMPAT if (test_thread_flag(TIF_32BIT)) { - extern int do_signal32(sigset_t *, struct pt_regs *, - unsigned long, int); - return do_signal32(oldset, regs, orig_i0, - cookie.restart_syscall); + extern void do_signal32(sigset_t *, struct pt_regs *, + unsigned long, int); + do_signal32(oldset, regs, orig_i0, + cookie.restart_syscall); + return; } #endif @@ -635,7 +550,15 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, if (cookie.restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); handle_signal(signr, &ka, &info, oldset, regs); - return 1; + + /* a signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + return; } if (cookie.restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || @@ -652,15 +575,21 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, regs->tpc -= 4; regs->tnpc -= 4; } - return 0; + + /* if there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } -void do_notify_resume(sigset_t *oldset, struct pt_regs *regs, - unsigned long orig_i0, int restart_syscall, +void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall, unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_SIGPENDING) - do_signal(oldset, regs, orig_i0, restart_syscall); + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + do_signal(regs, orig_i0, restart_syscall); } void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index 009a86e5ded4..708ba9b42cda 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c @@ -32,9 +32,6 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -int do_signal32(sigset_t *oldset, struct pt_regs *regs, - unsigned long orig_o0, int ret_from_syscall); - /* Signal frames: the original one (compatible with SunOS): * * Set up a signal frame... Make the stack look the way SunOS @@ -226,102 +223,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } -/* - * atomically swap in the new signal mask, and wait for a signal. - * This is really tricky on the Sparc, watch out... - */ -asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs) -{ - sigset_t saveset; - - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - regs->tpc = regs->tnpc; - regs->tnpc += 4; - if (test_thread_flag(TIF_32BIT)) { - regs->tpc &= 0xffffffff; - regs->tnpc &= 0xffffffff; - } - - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->tstate |= TSTATE_ICARRY; - regs->u_regs[UREG_I0] = EINTR; - if (do_signal32(&saveset, regs, 0, 0)) - return; - } -} - -asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs) -{ - sigset_t oldset, set; - compat_sigset_t set32; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) { - regs->tstate |= TSTATE_ICARRY; - regs->u_regs[UREG_I0] = EINVAL; - return; - } - if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) { - regs->tstate |= TSTATE_ICARRY; - regs->u_regs[UREG_I0] = EFAULT; - return; - } - switch (_NSIG_WORDS) { - case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32); - case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32); - case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32); - case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32); - } - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - oldset = current->blocked; - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - regs->tpc = regs->tnpc; - regs->tnpc += 4; - if (test_thread_flag(TIF_32BIT)) { - regs->tpc &= 0xffffffff; - regs->tnpc &= 0xffffffff; - } - - /* Condition codes and return value where set here for sigpause, - * and so got used by setup_frame, which again causes sigreturn() - * to return -EINTR. - */ - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - /* - * Return -EINTR and set condition code here, - * so the interrupted system call actually returns - * these. - */ - regs->tstate |= TSTATE_ICARRY; - regs->u_regs[UREG_I0] = EINTR; - if (do_signal32(&oldset, regs, 0, 0)) - return; - } -} - static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { unsigned long *fpregs = current_thread_info()->fpregs; @@ -1362,8 +1263,8 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -int do_signal32(sigset_t *oldset, struct pt_regs * regs, - unsigned long orig_i0, int restart_syscall) +void do_signal32(sigset_t *oldset, struct pt_regs * regs, + unsigned long orig_i0, int restart_syscall) { siginfo_t info; struct signal_deliver_cookie cookie; @@ -1380,7 +1281,15 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, syscall_restart32(orig_i0, regs, &ka.sa); handle_signal32(signr, &ka, &info, oldset, regs, svr4_signal); - return 1; + + /* a signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + return; } if (cookie.restart_syscall && (regs->u_regs[UREG_I0] == ERESTARTNOHAND || @@ -1397,7 +1306,14 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, regs->tpc -= 4; regs->tnpc -= 4; } - return 0; + + /* if there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } struct sigstack32 { diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index d177d7e5c9d3..3c06bfb92a8c 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -69,7 +69,6 @@ struct poll { extern void die_if_kernel(char *str, struct pt_regs *regs); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); -void _sigpause_common (unsigned int set, struct pt_regs *); extern void *__bzero(void *, size_t); extern void *__memscan_zero(void *, size_t); extern void *__memscan_generic(void *, int, size_t); @@ -236,9 +235,10 @@ EXPORT_SYMBOL(pci_dma_supported); /* I/O device mmaping on Sparc64. */ EXPORT_SYMBOL(io_remap_pfn_range); +#ifdef CONFIG_COMPAT /* Solaris/SunOS binary compatibility */ -EXPORT_SYMBOL(_sigpause_common); EXPORT_SYMBOL(verify_compat_iovec); +#endif EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(pte_alloc_one_kernel); diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index 98d24bc00044..bf0fc5bfbfbe 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S @@ -41,7 +41,7 @@ sys_call_table32: /*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall /*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending - .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid + .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd /*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod @@ -76,7 +76,10 @@ sys_call_table32: .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid -/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl +/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat + .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_newfstatat +/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat + .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll #endif /* CONFIG_COMPAT */ @@ -142,7 +145,10 @@ sys_call_table: .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid -/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl +/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat + .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, compat_sys_newfstatat +/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat + .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ defined(CONFIG_SOLARIS_EMUL_MODULE) @@ -239,13 +245,20 @@ sunos_sys_table: /*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys + .word sunos_nosys +/*260*/ .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys + .word sunos_nosys +/*270*/ .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys + .word sunos_nosys +/*280*/ .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys + .word sunos_nosys +/*290*/ .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys .word sunos_nosys, sunos_nosys, sunos_nosys - .word sunos_nosys #endif diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S index 4b6ae583c0a3..eb314ed23cdb 100644 --- a/arch/sparc64/solaris/entry64.S +++ b/arch/sparc64/solaris/entry64.S @@ -180,6 +180,8 @@ solaris_sigsuspend: nop call sys_sigsuspend stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] + b,pt %xcc, ret_from_solaris + nop .globl solaris_getpid solaris_getpid: diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c index 5468e5a767e2..43e67d6c29d4 100644 --- a/drivers/serial/sn_console.c +++ b/drivers/serial/sn_console.c @@ -6,7 +6,7 @@ * driver for that. * * - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License @@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void) misc.name = DEVICE_NAME_DYNAMIC; retval = misc_register(&misc); if (retval != 0) { - printk - ("Failed to register console device using misc_register.\n"); + printk(KERN_WARNING "Failed to register console " + "device using misc_register.\n"); return -ENODEV; } sal_console_uart.major = MISC_MAJOR; @@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) { unsigned long flags = 0; struct sn_cons_port *port = &sal_console_port; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) static int stole_lock = 0; -#endif BUG_ON(!port->sc_is_asynch); /* We can't look at the xmit buffer if we're not registered with serial core * yet. So only do the fancy recovery after registering */ - if (port->sc_port.info) { - - /* somebody really wants this output, might be an - * oops, kdb, panic, etc. make sure they get it. */ -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) - if (spin_is_locked(&port->sc_port.lock)) { - int lhead = port->sc_port.info->xmit.head; - int ltail = port->sc_port.info->xmit.tail; - int counter, got_lock = 0; + if (!port->sc_port.info) { + /* Not yet registered with serial core - simple case */ + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); + return; + } - /* - * We attempt to determine if someone has died with the - * lock. We wait ~20 secs after the head and tail ptrs - * stop moving and assume the lock holder is not functional - * and plow ahead. If the lock is freed within the time out - * period we re-get the lock and go ahead normally. We also - * remember if we have plowed ahead so that we don't have - * to wait out the time out period again - the asumption - * is that we will time out again. - */ + /* somebody really wants this output, might be an + * oops, kdb, panic, etc. make sure they get it. */ + if (spin_is_locked(&port->sc_port.lock)) { + int lhead = port->sc_port.info->xmit.head; + int ltail = port->sc_port.info->xmit.tail; + int counter, got_lock = 0; + + /* + * We attempt to determine if someone has died with the + * lock. We wait ~20 secs after the head and tail ptrs + * stop moving and assume the lock holder is not functional + * and plow ahead. If the lock is freed within the time out + * period we re-get the lock and go ahead normally. We also + * remember if we have plowed ahead so that we don't have + * to wait out the time out period again - the asumption + * is that we will time out again. + */ - for (counter = 0; counter < 150; mdelay(125), counter++) { - if (!spin_is_locked(&port->sc_port.lock) - || stole_lock) { - if (!stole_lock) { - spin_lock_irqsave(&port-> - sc_port.lock, - flags); - got_lock = 1; - } - break; - } else { - /* still locked */ - if ((lhead != - port->sc_port.info->xmit.head) - || (ltail != - port->sc_port.info->xmit. - tail)) { - lhead = - port->sc_port.info->xmit. - head; - ltail = - port->sc_port.info->xmit. - tail; - counter = 0; - } + for (counter = 0; counter < 150; mdelay(125), counter++) { + if (!spin_is_locked(&port->sc_port.lock) + || stole_lock) { + if (!stole_lock) { + spin_lock_irqsave(&port->sc_port.lock, + flags); + got_lock = 1; } - } - /* flush anything in the serial core xmit buffer, raw */ - sn_transmit_chars(port, 1); - if (got_lock) { - spin_unlock_irqrestore(&port->sc_port.lock, - flags); - stole_lock = 0; + break; } else { - /* fell thru */ - stole_lock = 1; + /* still locked */ + if ((lhead != port->sc_port.info->xmit.head) + || (ltail != + port->sc_port.info->xmit.tail)) { + lhead = + port->sc_port.info->xmit.head; + ltail = + port->sc_port.info->xmit.tail; + counter = 0; + } } - puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); - } else { - stole_lock = 0; -#endif - spin_lock_irqsave(&port->sc_port.lock, flags); - sn_transmit_chars(port, 1); + } + /* flush anything in the serial core xmit buffer, raw */ + sn_transmit_chars(port, 1); + if (got_lock) { spin_unlock_irqrestore(&port->sc_port.lock, flags); - - puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + stole_lock = 0; + } else { + /* fell thru */ + stole_lock = 1; } -#endif - } - else { - /* Not yet registered with serial core - simple case */ + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); + } else { + stole_lock = 0; + spin_lock_irqsave(&port->sc_port.lock, flags); + sn_transmit_chars(port, 1); + spin_unlock_irqrestore(&port->sc_port.lock, flags); + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); } } diff --git a/fs/compat.c b/fs/compat.c index 18b21b4c9e3a..ff0bafcff720 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1743,7 +1743,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS) timeout = -1; /* infinite */ else { - timeout = ROUND_UP(tv.tv_sec, 1000000/HZ); + timeout = ROUND_UP(tv.tv_usec, 1000000/HZ); timeout += tv.tv_sec * HZ; } } @@ -1884,7 +1884,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, /* We assume that ts.tv_sec is always lower than the number of seconds that can be expressed in an s64. Otherwise the compiler bitches at us */ - timeout = ROUND_UP(ts.tv_sec, 1000000000/HZ); + timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ); timeout += ts.tv_sec * HZ; } diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index bb8906285fab..f483eeb95dd1 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h @@ -61,7 +61,7 @@ static inline void down (struct semaphore *sem) { might_sleep(); - if (atomic_dec_return(&sem->count) < 0) + if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) __down(sem); } @@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem) int ret = 0; might_sleep(); - if (atomic_dec_return(&sem->count) < 0) + if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) ret = __down_interruptible(sem); return ret; } @@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem) { int ret = 0; - if (atomic_dec_return(&sem->count) < 0) + if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) ret = __down_trylock(sem); return ret; } @@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem) static inline void up (struct semaphore * sem) { - if (atomic_inc_return(&sem->count) <= 0) + if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1) __up(sem); } diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index 203945ae034e..9bd2f9bf329b 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h @@ -18,6 +18,7 @@ #include <linux/cache.h> #include <linux/hardirq.h> +#include <linux/mutex.h> #include <asm/sn/types.h> #include <asm/sn/bte.h> @@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, * the channel. */ struct xpc_registration { - struct semaphore sema; + struct mutex mutex; xpc_channel_func func; /* function to call */ void *key; /* pointer to user's key */ u16 nentries; /* #of msg entries in local msg queue */ diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 87e9cd588510..0c36928ffd8b 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -19,6 +19,8 @@ #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/device.h> +#include <linux/mutex.h> +#include <linux/completion.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sn/bte.h> @@ -335,8 +337,7 @@ struct xpc_openclose_args { * and consumed by the intended recipient. */ struct xpc_notify { - struct semaphore sema; /* notify semaphore */ - volatile u8 type; /* type of notification */ + volatile u8 type; /* type of notification */ /* the following two fields are only used if type == XPC_N_CALL */ xpc_notify_func func; /* user's notify function */ @@ -465,8 +466,8 @@ struct xpc_channel { xpc_channel_func func; /* user's channel function */ void *key; /* pointer to user's key */ - struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ - struct semaphore wdisconnect_sema; /* wait for channel disconnect */ + struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ + struct completion wdisconnect_wait; /* wait for channel disconnect */ struct xpc_openclose_args *local_openclose_args; /* args passed on */ /* opening or closing of channel */ diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index d8aae4da3978..412ef8e493a8 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -18,6 +18,10 @@ #include <asm/smp.h> #ifdef CONFIG_NUMA + +/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */ +#define PENALTY_FOR_NODE_WITH_CPUS 255 + /* * Returns the number of the node containing CPU 'cpu' */ diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h index 95944556d8b6..d0d76b30eb4c 100644 --- a/include/asm-sparc/oplib.h +++ b/include/asm-sparc/oplib.h @@ -164,6 +164,7 @@ enum prom_input_device { PROMDEV_IKBD, /* input from keyboard */ PROMDEV_ITTYA, /* input from ttya */ PROMDEV_ITTYB, /* input from ttyb */ + PROMDEV_IRSC, /* input from rsc */ PROMDEV_I_UNK, }; @@ -175,6 +176,7 @@ enum prom_output_device { PROMDEV_OSCREEN, /* to screen */ PROMDEV_OTTYA, /* to ttya */ PROMDEV_OTTYB, /* to ttyb */ + PROMDEV_ORSC, /* to rsc */ PROMDEV_O_UNK, }; diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h index 65f060b040ab..91b9f5888c85 100644 --- a/include/asm-sparc/thread_info.h +++ b/include/asm-sparc/thread_info.h @@ -128,9 +128,10 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) * thread information flag bit numbers */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +/* flag bit 1 is available */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 8 /* FPU was used by this task * this quantum (SMP) */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling @@ -139,9 +140,9 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_USEDFPU (1<<TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index 58dba518239e..2ac64e65e336 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h @@ -300,11 +300,26 @@ #define __NR_add_key 281 #define __NR_request_key 282 #define __NR_keyctl 283 +#define __NR_openat 284 +#define __NR_mkdirat 285 +#define __NR_mknodat 286 +#define __NR_fchownat 287 +#define __NR_futimesat 288 +#define __NR_newfstatat 289 +#define __NR_unlinkat 290 +#define __NR_renameat 291 +#define __NR_linkat 292 +#define __NR_symlinkat 293 +#define __NR_readlinkat 294 +#define __NR_fchmodat 295 +#define __NR_faccessat 296 +#define __NR_pselect6 297 +#define __NR_ppoll 298 -/* WARNING: You MAY NOT add syscall numbers larger than 283, since +/* WARNING: You MAY NOT add syscall numbers larger than 298, since * all of the syscall tables in the Sparc kernel are - * sized to have 283 entries (starting at zero). Therefore - * find a free slot in the 0-282 range. + * sized to have 298 entries (starting at zero). Therefore + * find a free slot in the 0-298 range. */ #define _syscall0(type,name) \ @@ -458,6 +473,7 @@ return -1; \ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #endif #ifdef __KERNEL_SYSCALLS__ diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index c94d8b3991bd..ac9d068aab4f 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); * nop */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_PERFCTR 4 /* performance counters active */ @@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_POLLING_NRFLAG 14 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_PERFCTR (1<<TIF_PERFCTR) @@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ + (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \ _TIF_NEED_RESCHED | _TIF_PERFCTR)) #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 51ec2879b881..84ac2bdb0902 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -302,11 +302,26 @@ #define __NR_add_key 281 #define __NR_request_key 282 #define __NR_keyctl 283 +#define __NR_openat 284 +#define __NR_mkdirat 285 +#define __NR_mknodat 286 +#define __NR_fchownat 287 +#define __NR_futimesat 288 +#define __NR_newfstatat 289 +#define __NR_unlinkat 290 +#define __NR_renameat 291 +#define __NR_linkat 292 +#define __NR_symlinkat 293 +#define __NR_readlinkat 294 +#define __NR_fchmodat 295 +#define __NR_faccessat 296 +#define __NR_pselect6 297 +#define __NR_ppoll 298 -/* WARNING: You MAY NOT add syscall numbers larger than 283, since +/* WARNING: You MAY NOT add syscall numbers larger than 298, since * all of the syscall tables in the Sparc kernel are - * sized to have 283 entries (starting at zero). Therefore - * find a free slot in the 0-282 range. + * sized to have 298 entries (starting at zero). Therefore + * find a free slot in the 0-298 range. */ #define _syscall0(type,name) \ @@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig, #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #endif /* diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 472f04834809..59ff6c430cf6 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -19,7 +19,7 @@ struct xt_get_revision /* For standard target */ #define XT_RETURN (-NF_REPEAT - 1) -#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1)) +#define XT_ALIGN(s) (((s) + (__alignof__(u_int64_t)-1)) & ~(__alignof__(u_int64_t)-1)) /* Standard return verdict, or do jump. */ #define XT_STANDARD_TARGET "" diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index a553f39f6aee..e673b2c984e9 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t); +void sctp_backlog_migrate(struct sctp_association *assoc, + struct sock *oldsk, struct sock *newsk); /* * Section: Macros, externs, and inlines diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f5c22d77feab..8c522ae031bb 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -127,9 +127,9 @@ extern struct sctp_globals { * RTO.Alpha - 1/8 (3 when converted to right shifts.) * RTO.Beta - 1/4 (2 when converted to right shifts.) */ - __u32 rto_initial; - __u32 rto_min; - __u32 rto_max; + unsigned long rto_initial; + unsigned long rto_min; + unsigned long rto_max; /* Note: rto_alpha and rto_beta are really defined as inverse * powers of two to facilitate integer operations. @@ -140,12 +140,18 @@ extern struct sctp_globals { /* Max.Burst - 4 */ int max_burst; - /* Valid.Cookie.Life - 60 seconds */ - int valid_cookie_life; - /* Whether Cookie Preservative is enabled(1) or not(0) */ int cookie_preserve_enable; + /* Valid.Cookie.Life - 60 seconds */ + unsigned long valid_cookie_life; + + /* Delayed SACK timeout 200ms default*/ + unsigned long sack_timeout; + + /* HB.interval - 30 seconds */ + unsigned long hb_interval; + /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) * Max.Init.Retransmits - 8 attempts @@ -168,12 +174,6 @@ extern struct sctp_globals { */ int rcvbuf_policy; - /* Delayed SACK timeout 200ms default*/ - int sack_timeout; - - /* HB.interval - 30 seconds */ - int hb_interval; - /* The following variables are implementation specific. */ /* Default initialization values to be applied to new associations. */ @@ -405,8 +405,9 @@ struct sctp_cookie { /* The format of our cookie that we send to our peer. */ struct sctp_signed_cookie { __u8 signature[SCTP_SECRET_SIZE]; + __u32 __pad; /* force sctp_cookie alignment to 64 bits */ struct sctp_cookie c; -}; +} __attribute__((packed)); /* This is another convenience type to allocate memory for address * params for the maximum size and pass such structures around @@ -827,7 +828,7 @@ struct sctp_transport { __u32 rtt; /* This is the most recent RTT. */ /* RTO : The current retransmission timeout value. */ - __u32 rto; + unsigned long rto; /* RTTVAR : The current RTT variation. */ __u32 rttvar; @@ -877,22 +878,10 @@ struct sctp_transport { /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to * the destination address every heartbeat interval. */ - __u32 hbinterval; - - /* This is the max_retrans value for the transport and will - * be initialized from the assocs value. This can be changed - * using SCTP_SET_PEER_ADDR_PARAMS socket option. - */ - __u16 pathmaxrxt; - - /* PMTU : The current known path MTU. */ - __u32 pathmtu; + unsigned long hbinterval; /* SACK delay timeout */ - __u32 sackdelay; - - /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */ - __u32 param_flags; + unsigned long sackdelay; /* When was the last time (in jiffies) that we heard from this * transport? We use this to pick new active and retran paths. @@ -904,6 +893,18 @@ struct sctp_transport { */ unsigned long last_time_ecne_reduced; + /* This is the max_retrans value for the transport and will + * be initialized from the assocs value. This can be changed + * using SCTP_SET_PEER_ADDR_PARAMS socket option. + */ + __u16 pathmaxrxt; + + /* PMTU : The current known path MTU. */ + __u32 pathmtu; + + /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */ + __u32 param_flags; + /* The number of times INIT has been sent on this transport. */ int init_sent_count; @@ -1249,6 +1250,14 @@ struct sctp_endpoint { int last_key; int key_changed_at; + /* digest: This is a digest of the sctp cookie. This field is + * only used on the receive path when we try to validate + * that the cookie has not been tampered with. We put + * this here so we pre-allocate this once and can re-use + * on every receive. + */ + __u8 digest[SCTP_SIGNATURE_SIZE]; + /* sendbuf acct. policy. */ __u32 sndbuf_policy; @@ -1499,9 +1508,9 @@ struct sctp_association { * These values will be initialized by system defaults, but can * be modified via the SCTP_RTOINFO socket option. */ - __u32 rto_initial; - __u32 rto_max; - __u32 rto_min; + unsigned long rto_initial; + unsigned long rto_max; + unsigned long rto_min; /* Maximum number of new data packets that can be sent in a burst. */ int max_burst; @@ -1519,13 +1528,13 @@ struct sctp_association { __u16 init_retries; /* The largest timeout or RTO value to use in attempting an INIT */ - __u16 max_init_timeo; + unsigned long max_init_timeo; /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to * the destination address every heartbeat interval. This value * will be inherited by all new transports. */ - __u32 hbinterval; + unsigned long hbinterval; /* This is the max_retrans value for new transports in the * association. @@ -1537,13 +1546,14 @@ struct sctp_association { */ __u32 pathmtu; - /* SACK delay timeout */ - __u32 sackdelay; - /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */ __u32 param_flags; - int timeouts[SCTP_NUM_TIMEOUT_TYPES]; + /* SACK delay timeout */ + unsigned long sackdelay; + + + unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES]; struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES]; /* Transport to which SHUTDOWN chunk was last sent. */ @@ -1648,7 +1658,10 @@ struct sctp_association { /* How many duplicated TSNs have we seen? */ int numduptsns; - /* Number of seconds of idle time before an association is closed. */ + /* Number of seconds of idle time before an association is closed. + * In the association context, this is really used as a boolean + * since the real timeout is stored in the timeouts array + */ __u32 autoclose; /* These are to support diff --git a/net/sctp/input.c b/net/sctp/input.c index 4aa6fc60357c..cb78b50868ee 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb) */ sctp_bh_lock_sock(sk); + /* It is possible that the association could have moved to a different + * socket if it is peeled off. If so, update the sk. + */ + if (sk != rcvr->sk) { + sctp_bh_lock_sock(rcvr->sk); + sctp_bh_unlock_sock(sk); + sk = rcvr->sk; + } + if (sock_owned_by_user(sk)) sk_add_backlog(sk, skb); else sctp_backlog_rcv(sk, skb); - /* Release the sock and any reference counts we took in the - * lookup calls. + /* Release the sock and the sock ref we took in the lookup calls. + * The asoc/ep ref will be released in sctp_backlog_rcv. */ sctp_bh_unlock_sock(sk); - if (asoc) - sctp_association_put(asoc); - else - sctp_endpoint_put(ep); sock_put(sk); + return ret; discard_it: @@ -296,12 +302,50 @@ discard_release: int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; - struct sctp_inq *inqueue = &chunk->rcvr->inqueue; - - sctp_inq_push(inqueue, chunk); + struct sctp_inq *inqueue = NULL; + struct sctp_ep_common *rcvr = NULL; + + rcvr = chunk->rcvr; + + BUG_TRAP(rcvr->sk == sk); + + if (rcvr->dead) { + sctp_chunk_free(chunk); + } else { + inqueue = &chunk->rcvr->inqueue; + sctp_inq_push(inqueue, chunk); + } + + /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) + sctp_association_put(sctp_assoc(rcvr)); + else + sctp_endpoint_put(sctp_ep(rcvr)); + return 0; } +void sctp_backlog_migrate(struct sctp_association *assoc, + struct sock *oldsk, struct sock *newsk) +{ + struct sk_buff *skb; + struct sctp_chunk *chunk; + + skb = oldsk->sk_backlog.head; + oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; + while (skb != NULL) { + struct sk_buff *next = skb->next; + + chunk = SCTP_INPUT_CB(skb)->chunk; + skb->next = NULL; + if (&assoc->base == chunk->rcvr) + sk_add_backlog(newsk, skb); + else + sk_add_backlog(oldsk, skb); + skb = next; + } +} + /* Handle icmp frag needed error. */ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t, __u32 pmtu) @@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb) sctp_errhdr_t *err; ch = (sctp_chunkhdr_t *) skb->data; - ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); /* Scan through all the chunks in the packet. */ - while (ch_end > (__u8 *)ch && ch_end < skb->tail) { + do { + /* Break out if chunk length is less then minimal. */ + if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) + break; + + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb->tail) + break; /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the * receiver MUST silently discard the OOTB packet and take no @@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) } ch = (sctp_chunkhdr_t *) ch_end; - ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); - } + } while (ch_end < skb->tail); return 0; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 2d33922c044b..297b8951463e 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue) /* If there is a packet which is currently being worked on, * free it as well. */ - if (queue->in_progress) + if (queue->in_progress) { sctp_chunk_free(queue->in_progress); + queue->in_progress = NULL; + } if (queue->malloced) { /* Dump the master memory segment. */ diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 6e4dc28874d7..d47a52c303a8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) { - if (*pos > sctp_ep_hashsize) + if (*pos >= sctp_ep_hashsize) return NULL; if (*pos < 0) @@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); - ++*pos; - return (void *)pos; } @@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v) static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - if (*pos > sctp_ep_hashsize) + if (++*pos >= sctp_ep_hashsize) return NULL; - ++*pos; - return pos; } @@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; - int hash = *(int *)v; + int hash = *(loff_t *)v; - if (hash > sctp_ep_hashsize) + if (hash >= sctp_ep_hashsize) return -ENOMEM; - head = &sctp_ep_hashtable[hash-1]; + head = &sctp_ep_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); for (epb = head->chain; epb; epb = epb->next) { ep = sctp_ep(epb); sk = epb->sk; seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, - sctp_sk(sk)->type, sk->sk_state, hash-1, + sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); @@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void) static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) { - if (*pos > sctp_assoc_hashsize) + if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) @@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " "RPORT LADDRS <-> RADDRS\n"); - ++*pos; - return (void *)pos; } @@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - if (*pos > sctp_assoc_hashsize) + if (++*pos >= sctp_assoc_hashsize) return NULL; - ++*pos; - return pos; } @@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) struct sctp_ep_common *epb; struct sctp_association *assoc; struct sock *sk; - int hash = *(int *)v; + int hash = *(loff_t *)v; - if (hash > sctp_assoc_hashsize) + if (hash >= sctp_assoc_hashsize) return -ENOMEM; - head = &sctp_assoc_hashtable[hash-1]; + head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); for (epb = head->chain; epb; epb = epb->next) { @@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, - assoc->state, hash-1, assoc->assoc_id, + assoc->state, hash, assoc->assoc_id, (sk->sk_rcvbuf - assoc->rwnd), assoc->sndbuf_used, sock_i_uid(sk), sock_i_ino(sk), diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 556c495c6922..5e0de3c0eead 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, unsigned int keylen; char *key; - headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE; + /* Header size is static data prior to the actual cookie, including + * any padding. + */ + headersize = sizeof(sctp_paramhdr_t) + + (sizeof(struct sctp_signed_cookie) - + sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; @@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie( struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; - __u8 digest[SCTP_SIGNATURE_SIZE]; + __u8 *digest = ep->digest; struct scatterlist sg; unsigned int keylen, len; char *key; @@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie( struct sk_buff *skb = chunk->skb; struct timeval tv; - headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; + /* Header size is static data prior to the actual cookie, including + * any padding. + */ + headersize = sizeof(sctp_chunkhdr_t) + + (sizeof(struct sctp_signed_cookie) - + sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b8b38aba92b3..8d1dc24bab4c 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, "T1 INIT Timeout adjustment" " init_err_counter: %d" " cycle: %d" - " timeout: %d\n", + " timeout: %ld\n", asoc->init_err_counter, asoc->init_cycle, asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); @@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, SCTP_DEBUG_PRINTK( "T1 COOKIE Timeout adjustment" " init_err_counter: %d" - " timeout: %d\n", + " timeout: %ld\n", asoc->init_err_counter, asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 477d7f80dba6..71c9a961c321 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb->tail) + break; if (SCTP_CID_SHUTDOWN_ACK == ch->type) ootb_shut_ack = 1; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c98ee375ba5e..fb1821d9f338 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->hbinterval = jiffies_to_msecs(sctp_hb_interval); sp->pathmaxrxt = sctp_max_retrans_path; sp->pathmtu = 0; // allow default discovery - sp->sackdelay = sctp_sack_timeout; + sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout); sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; @@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, */ newsp->type = type; + spin_lock_bh(&oldsk->sk_lock.slock); + /* Migrate the backlog from oldsk to newsk. */ + sctp_backlog_migrate(assoc, oldsk, newsk); /* Migrate the association to the new socket. */ sctp_assoc_migrate(assoc, newsk); + spin_unlock_bh(&oldsk->sk_lock.slock); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index fcd7096c953d..dc6f3ff32358 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -159,12 +159,9 @@ static ctl_table sctp_table[] = { .ctl_name = NET_SCTP_PRESERVE_ENABLE, .procname = "cookie_preserve_enable", .data = &sctp_cookie_preserve_enable, - .maxlen = sizeof(long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_doulongvec_ms_jiffies_minmax, - .strategy = &sctp_sysctl_jiffies_ms, - .extra1 = &rto_timer_min, - .extra2 = &rto_timer_max + .proc_handler = &proc_dointvec }, { .ctl_name = NET_SCTP_RTO_ALPHA, diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 68d73e2dd155..160f62ad1cc5 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) tp->rto_pending = 0; SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " - "rttvar: %d, rto: %d\n", __FUNCTION__, + "rttvar: %d, rto: %ld\n", __FUNCTION__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); } |