diff options
author | Steve French <sfrench@us.ibm.com> | 2008-02-15 22:06:08 +0100 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-02-15 22:06:08 +0100 |
commit | 0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41 (patch) | |
tree | b80b1d344ec24cad28b057ef803cebac9434be01 /arch/sh/kernel | |
parent | [CIFS] factoring out common code in get_inode_info functions (diff) | |
parent | Linux 2.6.25-rc2 (diff) | |
download | linux-0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41.tar.xz linux-0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41.zip |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/sh/kernel')
27 files changed, 681 insertions, 303 deletions
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index c89289831053..62bf373266f7 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -22,5 +22,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_BINFMT_ELF) += dump_task.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 1ef21cc087f3..e01283d49cbf 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -18,5 +18,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_BINFMT_ELF) += dump_task.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile index cc1836e47a5d..462a8f6dfee2 100644 --- a/arch/sh/kernel/cpu/irq/Makefile +++ b/arch/sh/kernel/cpu/irq/Makefile @@ -6,4 +6,3 @@ obj-y += intc.o obj-$(CONFIG_SUPERH32) += imask.o obj-$(CONFIG_CPU_SH5) += intc-sh5.o obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o -obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c index 43ee7a9a4f0b..d6e0e2bdaad5 100644 --- a/arch/sh/kernel/cpu/irq/intc-sh5.c +++ b/arch/sh/kernel/cpu/irq/intc-sh5.c @@ -75,21 +75,6 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = { -1, -1 /* 0xE00 - 0xE20 */ }; -/* - * Opposite mapper. - */ -static int IRQ_to_vectorN[NR_INTC_IRQS] = { - 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */ - -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */ - 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */ - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */ - 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */ - -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */ - -}; - static unsigned long intc_virt; static unsigned int startup_intc_irq(unsigned int irq); @@ -176,6 +161,18 @@ void make_intc_irq(unsigned int irq) } #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL) +static int IRQ_to_vectorN[NR_INTC_IRQS] = { + 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */ + -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */ + 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */ + 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */ + -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */ + +}; + int intc_irq_describe(char* p, int irq) { if (irq < NR_INTC_IRQS) diff --git a/arch/sh/kernel/cpu/irq/maskreg.c b/arch/sh/kernel/cpu/irq/maskreg.c deleted file mode 100644 index 978992e367a5..000000000000 --- a/arch/sh/kernel/cpu/irq/maskreg.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Interrupt handling for Simple external interrupt mask register - * - * Copyright (C) 2001 A&D Co., Ltd. <http://www.aandd.co.jp> - * - * This is for the machine which have single 16 bit register - * for masking external IRQ individually. - * Each bit of the register is for masking each interrupt. - * - * This file may be copied or modified under the terms of the GNU - * General Public License. See linux/COPYING for more information. - */ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <asm/system.h> -#include <asm/io.h> - -/* address of external interrupt mask register */ -unsigned long irq_mask_register; - -/* forward declaration */ -static unsigned int startup_maskreg_irq(unsigned int irq); -static void shutdown_maskreg_irq(unsigned int irq); -static void enable_maskreg_irq(unsigned int irq); -static void disable_maskreg_irq(unsigned int irq); -static void mask_and_ack_maskreg(unsigned int); -static void end_maskreg_irq(unsigned int irq); - -/* hw_interrupt_type */ -static struct hw_interrupt_type maskreg_irq_type = { - .typename = "Mask Register", - .startup = startup_maskreg_irq, - .shutdown = shutdown_maskreg_irq, - .enable = enable_maskreg_irq, - .disable = disable_maskreg_irq, - .ack = mask_and_ack_maskreg, - .end = end_maskreg_irq -}; - -/* actual implementation */ -static unsigned int startup_maskreg_irq(unsigned int irq) -{ - enable_maskreg_irq(irq); - return 0; /* never anything pending */ -} - -static void shutdown_maskreg_irq(unsigned int irq) -{ - disable_maskreg_irq(irq); -} - -static void disable_maskreg_irq(unsigned int irq) -{ - unsigned short val, mask = 0x01 << irq; - - BUG_ON(!irq_mask_register); - - /* Set "irq"th bit */ - val = ctrl_inw(irq_mask_register); - val |= mask; - ctrl_outw(val, irq_mask_register); -} - -static void enable_maskreg_irq(unsigned int irq) -{ - unsigned short val, mask = ~(0x01 << irq); - - BUG_ON(!irq_mask_register); - - /* Clear "irq"th bit */ - val = ctrl_inw(irq_mask_register); - val &= mask; - ctrl_outw(val, irq_mask_register); -} - -static void mask_and_ack_maskreg(unsigned int irq) -{ - disable_maskreg_irq(irq); -} - -static void end_maskreg_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - enable_maskreg_irq(irq); -} - -void make_maskreg_irq(unsigned int irq) -{ - disable_irq_nosync(irq); - irq_desc[irq].handler = &maskreg_irq_type; - disable_maskreg_irq(irq); -} diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index f2b9238cda04..9e89984c4f1d 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -126,12 +126,18 @@ int __init detect_cpu_and_cache_system(void) CPU_HAS_LLSC; break; case 0x3008: - if (prr == 0xa0) { + if (prr == 0xa0 || prr == 0xa1) { boot_cpu_data.type = CPU_SH7722; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_LLSC; } + else if (prr == 0x70) { + boot_cpu_data.type = CPU_SH7366; + boot_cpu_data.icache.ways = 4; + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.flags |= CPU_HAS_LLSC; + } break; case 0x4000: /* 1st cut */ case 0x4001: /* 2nd cut */ diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 3008c00eea6b..8250e017bd4e 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -263,7 +263,7 @@ struct sq_sysfs_attr { ssize_t (*store)(const char *buf, size_t count); }; -#define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr) +#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 08ac6387bf17..5d890ac8e793 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o +obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o # SMP setup @@ -21,6 +22,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o +clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o obj-y += $(clock-y) diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index a0fd8bb21f7c..299138ebe160 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -1,7 +1,7 @@ /* * arch/sh/kernel/cpu/sh4a/clock-sh7722.c * - * SH7722 support for the clock framework + * SH7722 & SH7366 support for the clock framework * * Copyright (c) 2006-2007 Nomad Global Solutions Inc * Based on code for sh7343 by Paul Mundt @@ -417,15 +417,19 @@ static int sh7722_siu_which(struct clk *clk) return 0; if (!strcmp(clk->name, "siu_b_clk")) return 1; +#if defined(CONFIG_CPU_SUBTYPE_SH7722) if (!strcmp(clk->name, "irda_clk")) return 2; +#endif return -EINVAL; } static unsigned long sh7722_siu_regs[] = { [0] = SCLKACR, [1] = SCLKBCR, +#if defined(CONFIG_CPU_SUBTYPE_SH7722) [2] = IrDACLKCR, +#endif }; static int sh7722_siu_start_stop(struct clk *clk, int enable) @@ -571,10 +575,12 @@ static struct clk sh7722_siu_b_clock = { .ops = &sh7722_siu_clk_ops, }; +#if defined(CONFIG_CPU_SUBTYPE_SH7722) static struct clk sh7722_irda_clock = { .name = "irda_clk", .ops = &sh7722_siu_clk_ops, }; +#endif static struct clk sh7722_video_clock = { .name = "video_clk", @@ -588,7 +594,9 @@ static struct clk *sh7722_clocks[] = { &sh7722_sdram_clock, &sh7722_siu_a_clock, &sh7722_siu_b_clock, +#if defined(CONFIG_CPU_SUBTYPE_SH7722) &sh7722_irda_clock, +#endif &sh7722_video_clock, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c new file mode 100644 index 000000000000..967e8b69a2f8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -0,0 +1,177 @@ +/* + * SH7366 Setup + * + * Copyright (C) 2008 Renesas Solutions + * + * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 80, 80, 80, 80 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7366_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7366_devices_setup(void) +{ + return platform_add_devices(sh7366_devices, + ARRAY_SIZE(sh7366_devices)); +} +__initcall(sh7366_devices_setup); + +enum { + UNUSED=0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + ICB, + DMAC0, DMAC1, DMAC2, DMAC3, + VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, + MFI, VPU, USB, + MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, + DMAC4, DMAC5, DMAC_DADERR, + SCIF, SCIFA1, SCIFA2, + DENC, MSIOF, + FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, + I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, + SDHI0, SDHI1, SDHI2, SDHI3, + CMT, TSIF, SIU, + TMU0, TMU1, TMU2, + VEU2, LCDC, + + /* interrupt groups */ + + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + INTC_VECT(ICB, 0x700), + INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), + INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), + INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), + INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), + INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), + INTC_VECT(MMC_MMC3I, 0xb40), + INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), + INTC_VECT(DMAC_DADERR, 0xbc0), + INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), + INTC_VECT(SCIFA2, 0xc40), + INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), + INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), + INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), + INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), + INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), + INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), + INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), + INTC_VECT(SIU, 0xf80), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), + INTC_VECT(VEU2, 0x580), INTC_VECT(LCDC, 0x580), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), + INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), + INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), + INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), + INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, + FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), + INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), + INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, 0, 0, 0, MFI } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { 0, 0, 0, ICB } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USB, } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0, 0, 0, 0, 0, 0, 0, TSIF } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7366", vectors, groups, + mask_registers, prio_registers, sense_registers); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +void __init plat_mem_setup(void) +{ + /* TODO: Register Node 1 */ +} diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c index 15d167fd0ae7..31f8cb0f6374 100644 --- a/arch/sh/kernel/cpu/sh5/probe.c +++ b/arch/sh/kernel/cpu/sh5/probe.c @@ -20,19 +20,18 @@ int __init detect_cpu_and_cache_system(void) { unsigned long long cir; - /* Do peeks in real mode to avoid having to set up a mapping for the - WPC registers. On SH5-101 cut2, such a mapping would be exposed to - an address translation erratum which would make it hard to set up - correctly. */ + /* + * Do peeks in real mode to avoid having to set up a mapping for + * the WPC registers. On SH5-101 cut2, such a mapping would be + * exposed to an address translation erratum which would make it + * hard to set up correctly. + */ cir = peek_real_address_q(0x0d000008); - if ((cir & 0xffff) == 0x5103) { + if ((cir & 0xffff) == 0x5103) boot_cpu_data.type = CPU_SH5_103; - } else if (((cir >> 32) & 0xffff) == 0x51e2) { + else if (((cir >> 32) & 0xffff) == 0x51e2) /* CPU.VCR aliased at CIR address on SH5-101 */ boot_cpu_data.type = CPU_SH5_101; - } else { - boot_cpu_data.type = CPU_SH_NONE; - } /* * First, setup some sane values for the I-cache. @@ -40,37 +39,33 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.icache.ways = 4; boot_cpu_data.icache.sets = 256; boot_cpu_data.icache.linesz = L1_CACHE_BYTES; + boot_cpu_data.icache.way_incr = (1 << 13); + boot_cpu_data.icache.entry_shift = 5; + boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets * + boot_cpu_data.icache.linesz; + boot_cpu_data.icache.entry_mask = 0x1fe0; + boot_cpu_data.icache.flags = 0; -#if 0 /* - * FIXME: This can probably be cleaned up a bit as well.. for example, - * do we really need the way shift _and_ the way_step_shift ?? Judging - * by the existing code, I would guess no.. is there any valid reason - * why we need to be tracking this around? + * Next, setup some sane values for the D-cache. + * + * On the SH5, these are pretty consistent with the I-cache settings, + * so we just copy over the existing definitions.. these can be fixed + * up later, especially if we add runtime CPU probing. + * + * Though in the meantime it saves us from having to duplicate all of + * the above definitions.. */ - boot_cpu_data.icache.way_shift = 13; - boot_cpu_data.icache.entry_shift = 5; - boot_cpu_data.icache.set_shift = 4; - boot_cpu_data.icache.way_step_shift = 16; - boot_cpu_data.icache.asid_shift = 2; + boot_cpu_data.dcache = boot_cpu_data.icache; /* - * way offset = cache size / associativity, so just don't factor in - * associativity in the first place.. + * Setup any cache-related flags here */ - boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets * - boot_cpu_data.icache.linesz; - - boot_cpu_data.icache.asid_mask = 0x3fc; - boot_cpu_data.icache.idx_mask = 0x1fe0; - boot_cpu_data.icache.epn_mask = 0xffffe000; +#if defined(CONFIG_CACHE_WRITETHROUGH) + set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags)); +#elif defined(CONFIG_CACHE_WRITEBACK) + set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags)); #endif - boot_cpu_data.icache.flags = 0; - - /* A trivial starting point.. */ - memcpy(&boot_cpu_data.dcache, - &boot_cpu_data.icache, sizeof(struct cache_info)); - return 0; } diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index 71c9fde2fd90..2b8991229900 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c @@ -63,7 +63,13 @@ EXPORT_SYMBOL(memset_io); void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return sh_mv.mv_ioport_map(port, nr); + void __iomem *ret; + + ret = __ioport_map_trapped(port, nr); + if (ret) + return ret; + + return __ioport_map(port, nr); } EXPORT_SYMBOL(ioport_map); diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index 771ea4230441..db769449f5a7 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -33,17 +33,17 @@ static inline void delay(void) u8 generic_inb(unsigned long port) { - return ctrl_inb((unsigned long __force)ioport_map(port, 1)); + return ctrl_inb((unsigned long __force)__ioport_map(port, 1)); } u16 generic_inw(unsigned long port) { - return ctrl_inw((unsigned long __force)ioport_map(port, 2)); + return ctrl_inw((unsigned long __force)__ioport_map(port, 2)); } u32 generic_inl(unsigned long port) { - return ctrl_inl((unsigned long __force)ioport_map(port, 4)); + return ctrl_inl((unsigned long __force)__ioport_map(port, 4)); } u8 generic_inb_p(unsigned long port) @@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count) volatile u8 *port_addr; u8 *buf = dst; - port_addr = (volatile u8 *)ioport_map(port, 1); + port_addr = (volatile u8 *)__ioport_map(port, 1); while (count--) *buf++ = *port_addr; } @@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count) volatile u16 *port_addr; u16 *buf = dst; - port_addr = (volatile u16 *)ioport_map(port, 2); + port_addr = (volatile u16 *)__ioport_map(port, 2); while (count--) *buf++ = *port_addr; @@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count) volatile u32 *port_addr; u32 *buf = dst; - port_addr = (volatile u32 *)ioport_map(port, 4); + port_addr = (volatile u32 *)__ioport_map(port, 4); while (count--) *buf++ = *port_addr; @@ -112,17 +112,17 @@ void generic_insl(unsigned long port, void *dst, unsigned long count) void generic_outb(u8 b, unsigned long port) { - ctrl_outb(b, (unsigned long __force)ioport_map(port, 1)); + ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1)); } void generic_outw(u16 b, unsigned long port) { - ctrl_outw(b, (unsigned long __force)ioport_map(port, 2)); + ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2)); } void generic_outl(u32 b, unsigned long port) { - ctrl_outl(b, (unsigned long __force)ioport_map(port, 4)); + ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4)); } void generic_outb_p(u8 b, unsigned long port) @@ -153,7 +153,7 @@ void generic_outsb(unsigned long port, const void *src, unsigned long count) volatile u8 *port_addr; const u8 *buf = src; - port_addr = (volatile u8 __force *)ioport_map(port, 1); + port_addr = (volatile u8 __force *)__ioport_map(port, 1); while (count--) *port_addr = *buf++; @@ -164,7 +164,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count) volatile u16 *port_addr; const u16 *buf = src; - port_addr = (volatile u16 __force *)ioport_map(port, 2); + port_addr = (volatile u16 __force *)__ioport_map(port, 2); while (count--) *port_addr = *buf++; @@ -177,7 +177,7 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count) volatile u32 *port_addr; const u32 *buf = src; - port_addr = (volatile u32 __force *)ioport_map(port, 4); + port_addr = (volatile u32 __force *)__ioport_map(port, 4); while (count--) *port_addr = *buf++; diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c new file mode 100644 index 000000000000..86a665d92201 --- /dev/null +++ b/arch/sh/kernel/io_trapped.c @@ -0,0 +1,276 @@ +/* + * Trapped io support + * + * Copyright (C) 2008 Magnus Damm + * + * Intercept io operations by trapping. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> +#include <linux/module.h> +#include <asm/system.h> +#include <asm/mmu_context.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/io_trapped.h> + +#define TRAPPED_PAGES_MAX 16 + +#ifdef CONFIG_HAS_IOPORT +LIST_HEAD(trapped_io); +EXPORT_SYMBOL_GPL(trapped_io); +#endif +#ifdef CONFIG_HAS_IOMEM +LIST_HEAD(trapped_mem); +EXPORT_SYMBOL_GPL(trapped_mem); +#endif +static DEFINE_SPINLOCK(trapped_lock); + +int __init register_trapped_io(struct trapped_io *tiop) +{ + struct resource *res; + unsigned long len = 0, flags = 0; + struct page *pages[TRAPPED_PAGES_MAX]; + int k, n; + + /* structure must be page aligned */ + if ((unsigned long)tiop & (PAGE_SIZE - 1)) + goto bad; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len += roundup((res->end - res->start) + 1, PAGE_SIZE); + flags |= res->flags; + } + + /* support IORESOURCE_IO _or_ MEM, not both */ + if (hweight_long(flags) != 1) + goto bad; + + n = len >> PAGE_SHIFT; + + if (n >= TRAPPED_PAGES_MAX) + goto bad; + + for (k = 0; k < n; k++) + pages[k] = virt_to_page(tiop); + + tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); + if (!tiop->virt_base) + goto bad; + + len = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", + (unsigned long)(tiop->virt_base + len), + res->flags & IORESOURCE_IO ? "io" : "mmio", + (unsigned long)res->start); + len += roundup((res->end - res->start) + 1, PAGE_SIZE); + } + + tiop->magic = IO_TRAPPED_MAGIC; + INIT_LIST_HEAD(&tiop->list); + spin_lock_irq(&trapped_lock); + if (flags & IORESOURCE_IO) + list_add(&tiop->list, &trapped_io); + if (flags & IORESOURCE_MEM) + list_add(&tiop->list, &trapped_mem); + spin_unlock_irq(&trapped_lock); + + return 0; + bad: + pr_warning("unable to install trapped io filter\n"); + return -1; +} +EXPORT_SYMBOL_GPL(register_trapped_io); + +void __iomem *match_trapped_io_handler(struct list_head *list, + unsigned long offset, + unsigned long size) +{ + unsigned long voffs; + struct trapped_io *tiop; + struct resource *res; + int k, len; + + spin_lock_irq(&trapped_lock); + list_for_each_entry(tiop, list, list) { + voffs = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + if (res->start == offset) { + spin_unlock_irq(&trapped_lock); + return tiop->virt_base + voffs; + } + + len = (res->end - res->start) + 1; + voffs += roundup(len, PAGE_SIZE); + } + } + spin_unlock_irq(&trapped_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(match_trapped_io_handler); + +static struct trapped_io *lookup_tiop(unsigned long address) +{ + pgd_t *pgd_k; + pud_t *pud_k; + pmd_t *pmd_k; + pte_t *pte_k; + pte_t entry; + + pgd_k = swapper_pg_dir + pgd_index(address); + if (!pgd_present(*pgd_k)) + return NULL; + + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + + pte_k = pte_offset_kernel(pmd_k, address); + entry = *pte_k; + + return pfn_to_kaddr(pte_pfn(entry)); +} + +static unsigned long lookup_address(struct trapped_io *tiop, + unsigned long address) +{ + struct resource *res; + unsigned long vaddr = (unsigned long)tiop->virt_base; + unsigned long len; + int k; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len = roundup((res->end - res->start) + 1, PAGE_SIZE); + if (address < (vaddr + len)) + return res->start + (address - vaddr); + vaddr += len; + } + return 0; +} + +static unsigned long long copy_word(unsigned long src_addr, int src_len, + unsigned long dst_addr, int dst_len) +{ + unsigned long long tmp = 0; + + switch (src_len) { + case 1: + tmp = ctrl_inb(src_addr); + break; + case 2: + tmp = ctrl_inw(src_addr); + break; + case 4: + tmp = ctrl_inl(src_addr); + break; + case 8: + tmp = ctrl_inq(src_addr); + break; + } + + switch (dst_len) { + case 1: + ctrl_outb(tmp, dst_addr); + break; + case 2: + ctrl_outw(tmp, dst_addr); + break; + case 4: + ctrl_outl(tmp, dst_addr); + break; + case 8: + ctrl_outq(tmp, dst_addr); + break; + } + + return tmp; +} + +static unsigned long from_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long src_addr = (unsigned long)src; + unsigned long long tmp; + + pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); + tiop = lookup_tiop(src_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + src_addr = lookup_address(tiop, src_addr); + if (!src_addr) + return cnt; + + tmp = copy_word(src_addr, + max_t(unsigned long, cnt, + (tiop->minimum_bus_width / 8)), + (unsigned long)dst, cnt); + + pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); + return 0; +} + +static unsigned long to_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long dst_addr = (unsigned long)dst; + unsigned long long tmp; + + pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); + tiop = lookup_tiop(dst_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + dst_addr = lookup_address(tiop, dst_addr); + if (!dst_addr) + return cnt; + + tmp = copy_word((unsigned long)src, cnt, + dst_addr, max_t(unsigned long, cnt, + (tiop->minimum_bus_width / 8))); + + pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); + return 0; +} + +static struct mem_access trapped_io_access = { + from_device, + to_device, +}; + +int handle_trapped_io(struct pt_regs *regs, unsigned long address) +{ + mm_segment_t oldfs; + opcode_t instruction; + int tmp; + + if (!lookup_tiop(address)) + return 0; + + WARN_ON(user_mode(regs)); + + oldfs = get_fs(); + set_fs(KERNEL_DS); + if (copy_from_user(&instruction, (void *)(regs->pc), + sizeof(instruction))) { + set_fs(oldfs); + return 0; + } + + tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); + set_fs(oldfs); + return tmp == 0; +} diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 0586bc62ad96..9bf19b00696a 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -248,9 +248,6 @@ asmlinkage void do_softirq(void) void __init init_IRQ(void) { -#ifdef CONFIG_CPU_HAS_PINT_IRQ - init_IRQ_pint(); -#endif plat_irq_setup(); /* Perform the machine specific initialisation */ diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index cff3b7dc9c56..046999b1d1af 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -623,6 +623,7 @@ extern void interruptible_sleep_on(wait_queue_head_t *q); #define mid_sched ((unsigned long) interruptible_sleep_on) +#ifdef CONFIG_FRAME_POINTER static int in_sh64_switch_to(unsigned long pc) { extern char __sh64_switch_to_end; @@ -631,12 +632,10 @@ static int in_sh64_switch_to(unsigned long pc) return (pc >= (unsigned long) sh64_switch_to) && (pc < (unsigned long) &__sh64_switch_to_end); } +#endif unsigned long get_wchan(struct task_struct *p) { - unsigned long schedule_fp; - unsigned long sh64_switch_to_fp; - unsigned long schedule_caller_pc; unsigned long pc; if (!p || p == current || p->state == TASK_RUNNING) @@ -649,6 +648,10 @@ unsigned long get_wchan(struct task_struct *p) #ifdef CONFIG_FRAME_POINTER if (in_sh64_switch_to(pc)) { + unsigned long schedule_fp; + unsigned long sh64_switch_to_fp; + unsigned long schedule_caller_pc; + sh64_switch_to_fp = (long) p->thread.sp; /* r14 is saved at offset 4 in the sh64_switch_to frame */ schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4); diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index ce0664a58b49..fddb547f3c2b 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -220,7 +220,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) dp = ((unsigned long) child) + THREAD_SIZE - sizeof(struct pt_dspregs); if (*((int *) (dp - 4)) == SR_FD) { - copy_to_user(addr, (void *) dp, + copy_to_user((void *)addr, (void *) dp, sizeof(struct pt_dspregs)); ret = 0; } @@ -234,7 +234,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) dp = ((unsigned long) child) + THREAD_SIZE - sizeof(struct pt_dspregs); if (*((int *) (dp - 4)) == SR_FD) { - copy_from_user((void *) dp, addr, + copy_from_user((void *) dp, (void *)addr, sizeof(struct pt_dspregs)); ret = 0; } diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 855cdf9d85b1..ff4f54a47c07 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -140,18 +140,26 @@ static void __init reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, free_mem, &crash_size, &crash_base); if (ret == 0 && crash_size) { - if (crash_base > 0) { - printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " - "for crashkernel (System RAM: %ldMB)\n", - (unsigned long)(crash_size >> 20), - (unsigned long)(crash_base >> 20), - (unsigned long)(free_mem >> 20)); - crashk_res.start = crash_base; - crashk_res.end = crash_base + crash_size - 1; - reserve_bootmem(crash_base, crash_size); - } else + if (crash_base <= 0) { printk(KERN_INFO "crashkernel reservation failed - " "you have to specify a base address\n"); + return; + } + + if (reserve_bootmem(crash_base, crash_size, + BOOTMEM_EXCLUSIVE) < 0) { + printk(KERN_INFO "crashkernel reservation failed - " + "memory is in use\n"); + return; + } + + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(free_mem >> 20)); + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; } } #else @@ -184,13 +192,14 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) * an invalid RAM area. */ reserve_bootmem(__MEMORY_START+PAGE_SIZE, - (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); + (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START, + BOOTMEM_DEFAULT); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ - reserve_bootmem(__MEMORY_START, PAGE_SIZE); + reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT); sparse_memory_present_with_active_regions(0); @@ -200,7 +209,7 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem(INITRD_START + __MEMORY_START, - INITRD_SIZE); + INITRD_SIZE, BOOTMEM_DEFAULT); initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START; initrd_end = initrd_start + INITRD_SIZE; @@ -324,7 +333,7 @@ static const char *cpu_name[] = { [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3", [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103", - [CPU_SH_NONE] = "Unknown" + [CPU_SH7366] = "SH7366", [CPU_SH_NONE] = "Unknown" }; const char *get_cpu_subtype(struct sh_cpuinfo *c) diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 719e127a7c05..a46cc3a41148 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -338,6 +338,8 @@ ENTRY(sys_call_table) .long sys_epoll_pwait .long sys_utimensat /* 320 */ .long sys_signalfd - .long sys_ni_syscall + .long sys_timerfd_create .long sys_eventfd .long sys_fallocate + .long sys_timerfd_settime /* 325 */ + .long sys_timerfd_gettime diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 12c7340356ae..d5d7843aad94 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -376,6 +376,8 @@ sys_call_table: .long sys_epoll_pwait .long sys_utimensat .long sys_signalfd - .long sys_ni_syscall /* 350 */ + .long sys_timerfd_create /* 350 */ .long sys_eventfd .long sys_fallocate + .long sys_timerfd_settime + .long sys_timerfd_gettime diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index 2bc04bfee738..7281342c044d 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -120,10 +120,6 @@ static long last_rtc_update; */ void handle_timer_tick(void) { - do_timer(1); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif if (current->pid) profile_tick(CPU_PROFILING); @@ -133,6 +129,16 @@ void handle_timer_tick(void) #endif /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other + * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * the irq version of write_lock because as just said we have irq + * locally disabled. -arca + */ + write_seqlock(&xtime_lock); + do_timer(1); + + /* * If we have an externally synchronized Linux clock, then update * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to 500 ms before the new second starts. @@ -147,6 +153,11 @@ void handle_timer_tick(void) /* do it again in 60s */ last_rtc_update = xtime.tv_sec - 600; } + write_sequnlock(&xtime_lock); + +#ifndef CONFIG_SMP + update_process_times(user_mode(get_irq_regs())); +#endif } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c index f819ba38a6ce..898977ee2030 100644 --- a/arch/sh/kernel/time_64.c +++ b/arch/sh/kernel/time_64.c @@ -229,15 +229,22 @@ static long last_rtc_update; static inline void do_timer_interrupt(void) { unsigned long long current_ctc; + + if (current->pid) + profile_tick(CPU_PROFILING); + + /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other + * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * the irq version of write_lock because as just said we have irq + * locally disabled. -arca + */ + write_lock(&xtime_lock); asm ("getcon cr62, %0" : "=r" (current_ctc)); ctc_last_interrupt = (unsigned long) current_ctc; do_timer(1); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - if (current->pid) - profile_tick(CPU_PROFILING); #ifdef CONFIG_HEARTBEAT if (sh_mv.mv_heartbeat != NULL) @@ -259,6 +266,11 @@ static inline void do_timer_interrupt(void) /* do it again in 60 s */ last_rtc_update = xtime.tv_sec - 600; } + write_unlock(&xtime_lock); + +#ifndef CONFIG_SMP + update_process_times(user_mode(get_irq_regs())); +#endif } /* @@ -275,16 +287,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) timer_status &= ~0x100; ctrl_outw(timer_status, TMU0_TCR); - /* - * Here we are in the timer irq handler. We just have irqs locally - * disabled but we don't know if the timer_bh is running on the other - * CPU. We need to avoid to SMP race with it. NOTE: we don' t need - * the irq version of write_lock because as just said we have irq - * locally disabled. -arca - */ - write_lock(&xtime_lock); do_timer_interrupt(); - write_unlock(&xtime_lock); return IRQ_HANDLED; } diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c index 499e07beebe2..71312324b5de 100644 --- a/arch/sh/kernel/timers/timer-cmt.c +++ b/arch/sh/kernel/timers/timer-cmt.c @@ -100,16 +100,7 @@ static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id) timer_status &= ~0x80; ctrl_outw(timer_status, CMT_CMCSR_0); - /* - * Here we are in the timer irq handler. We just have irqs locally - * disabled but we don't know if the timer_bh is running on the other - * CPU. We need to avoid to SMP race with it. NOTE: we don' t need - * the irq version of write_lock because as just said we have irq - * locally disabled. -arca - */ - write_seqlock(&xtime_lock); handle_timer_tick(); - write_sequnlock(&xtime_lock); return IRQ_HANDLED; } diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c index b7499a2a9188..ade9d6eb29f9 100644 --- a/arch/sh/kernel/timers/timer-mtu2.c +++ b/arch/sh/kernel/timers/timer-mtu2.c @@ -100,9 +100,7 @@ static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id) ctrl_outb(timer_status, MTU2_TSR_1); /* Do timer tick */ - write_seqlock(&xtime_lock); handle_timer_tick(); - write_sequnlock(&xtime_lock); return IRQ_HANDLED; } @@ -156,7 +154,6 @@ static int mtu2_timer_stop(void) static int mtu2_timer_init(void) { - u8 tmp; unsigned long interval; setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 2e58f7a6b746..baa4fa368dce 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -147,6 +147,36 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) return -EFAULT; } +static inline void sign_extend(unsigned int count, unsigned char *dst) +{ +#ifdef __LITTLE_ENDIAN__ + if ((count == 1) && dst[0] & 0x80) { + dst[1] = 0xff; + dst[2] = 0xff; + dst[3] = 0xff; + } + if ((count == 2) && dst[1] & 0x80) { + dst[2] = 0xff; + dst[3] = 0xff; + } +#else + if ((count == 1) && dst[3] & 0x80) { + dst[2] = 0xff; + dst[1] = 0xff; + dst[0] = 0xff; + } + if ((count == 2) && dst[2] & 0x80) { + dst[1] = 0xff; + dst[0] = 0xff; + } +#endif +} + +static struct mem_access user_mem_access = { + copy_from_user, + copy_to_user, +}; + /* * handle an instruction that does an unaligned memory access by emulating the * desired behaviour @@ -154,7 +184,8 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) * (if that instruction is in a branch delay slot) * - return 0 if emulation okay, -EFAULT on existential error */ -static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) +static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, + struct mem_access *ma) { int ret, index, count; unsigned long *rm, *rn; @@ -178,25 +209,13 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) rn; *(unsigned long*)dst = 0; -#ifdef __LITTLE_ENDIAN__ - if (copy_from_user(dst, src, count)) - goto fetch_fault; - - if ((count == 2) && dst[1] & 0x80) { - dst[2] = 0xff; - dst[3] = 0xff; - } -#else +#if !defined(__LITTLE_ENDIAN__) dst += 4-count; - - if (__copy_user(dst, src, count)) +#endif + if (ma->from(dst, src, count)) goto fetch_fault; - if ((count == 2) && dst[2] & 0x80) { - dst[0] = 0xff; - dst[1] = 0xff; - } -#endif + sign_extend(count, dst); } else { /* to memory */ src = (unsigned char*) rm; @@ -206,7 +225,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) *rn; dst += regs->regs[0]; - if (copy_to_user(dst, src, count)) + if (ma->to(dst, src, count)) goto fetch_fault; } ret = 0; @@ -217,7 +236,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) *rn; dst += (instruction&0x000F)<<2; - if (copy_to_user(dst,src,4)) + if (ma->to(dst, src, 4)) goto fetch_fault; ret = 0; break; @@ -230,7 +249,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif - if (copy_to_user(dst, src, count)) + if (ma->to(dst, src, count)) goto fetch_fault; ret = 0; break; @@ -241,7 +260,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) rn; *(unsigned long*)dst = 0; - if (copy_from_user(dst,src,4)) + if (ma->from(dst, src, 4)) goto fetch_fault; ret = 0; break; @@ -253,25 +272,12 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) rn; *(unsigned long*)dst = 0; -#ifdef __LITTLE_ENDIAN__ - if (copy_from_user(dst, src, count)) - goto fetch_fault; - - if ((count == 2) && dst[1] & 0x80) { - dst[2] = 0xff; - dst[3] = 0xff; - } -#else +#if !defined(__LITTLE_ENDIAN__) dst += 4-count; - - if (copy_from_user(dst, src, count)) - goto fetch_fault; - - if ((count == 2) && dst[2] & 0x80) { - dst[0] = 0xff; - dst[1] = 0xff; - } #endif + if (ma->from(dst, src, count)) + goto fetch_fault; + sign_extend(count, dst); ret = 0; break; @@ -285,7 +291,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) dst = (unsigned char*) *rm; /* called Rn in the spec */ dst += (instruction&0x000F)<<1; - if (copy_to_user(dst, src, 2)) + if (ma->to(dst, src, 2)) goto fetch_fault; ret = 0; break; @@ -299,21 +305,9 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) dst += 2; #endif - - if (copy_from_user(dst, src, 2)) + if (ma->from(dst, src, 2)) goto fetch_fault; - -#ifdef __LITTLE_ENDIAN__ - if (dst[1] & 0x80) { - dst[2] = 0xff; - dst[3] = 0xff; - } -#else - if (dst[2] & 0x80) { - dst[0] = 0xff; - dst[1] = 0xff; - } -#endif + sign_extend(2, dst); ret = 0; break; } @@ -332,11 +326,14 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) * emulate the instruction in the delay slot * - fetches the instruction from PC+2 */ -static inline int handle_unaligned_delayslot(struct pt_regs *regs) +static inline int handle_delayslot(struct pt_regs *regs, + opcode_t old_instruction, + struct mem_access *ma) { - u16 instruction; + opcode_t instruction; + void *addr = (void *)(regs->pc + instruction_size(old_instruction)); - if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { + if (copy_from_user(&instruction, addr, sizeof(instruction))) { /* the instruction-fetch faulted */ if (user_mode(regs)) return -EFAULT; @@ -346,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) regs, 0); } - return handle_unaligned_ins(instruction,regs); + return handle_unaligned_ins(instruction, regs, ma); } /* @@ -369,10 +366,11 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit * opcodes.. */ -#ifndef CONFIG_CPU_SH2A + static int handle_unaligned_notify_count = 10; -static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) +int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, + struct mem_access *ma) { u_int rm; int ret, index; @@ -387,7 +385,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) printk(KERN_NOTICE "Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", current->comm, task_pid_nr(current), - (u16 *)regs->pc, instruction); + (void *)regs->pc, instruction); } ret = -EFAULT; @@ -395,19 +393,19 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) case 0x0000: if (instruction==0x000B) { /* rts */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = regs->pr; } else if ((instruction&0x00FF)==0x0023) { /* braf @Rm */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += rm + 4; } else if ((instruction&0x00FF)==0x0003) { /* bsrf @Rm */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += rm + 4; @@ -428,13 +426,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) case 0x4000: if ((instruction&0x00FF)==0x002B) { /* jmp @Rm */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = rm; } else if ((instruction&0x00FF)==0x000B) { /* jsr @Rm */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc = rm; @@ -461,7 +459,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) case 0x0B00: /* bf lab - no delayslot*/ break; case 0x0F00: /* bf/s lab */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) != 0) @@ -474,7 +472,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) case 0x0900: /* bt lab - no delayslot */ break; case 0x0D00: /* bt/s lab */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) == 0) @@ -488,13 +486,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) break; case 0xA000: /* bra label */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += SH_PC_12BIT_OFFSET(instruction); break; case 0xB000: /* bsr label */ - ret = handle_unaligned_delayslot(regs); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += SH_PC_12BIT_OFFSET(instruction); @@ -505,12 +503,11 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) /* handle non-delay-slot instruction */ simple: - ret = handle_unaligned_ins(instruction,regs); + ret = handle_unaligned_ins(instruction, regs, ma); if (ret==0) regs->pc += instruction_size(instruction); return ret; } -#endif /* CONFIG_CPU_SH2A */ #ifdef CONFIG_CPU_HAS_SR_RB #define lookup_exception_vector(x) \ @@ -538,10 +535,8 @@ asmlinkage void do_address_error(struct pt_regs *regs, unsigned long error_code = 0; mm_segment_t oldfs; siginfo_t info; -#ifndef CONFIG_CPU_SH2A - u16 instruction; + opcode_t instruction; int tmp; -#endif /* Intentional ifdef */ #ifdef CONFIG_CPU_HAS_SR_RB @@ -561,9 +556,9 @@ asmlinkage void do_address_error(struct pt_regs *regs, goto uspace_segv; } -#ifndef CONFIG_CPU_SH2A set_fs(USER_DS); - if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { + if (copy_from_user(&instruction, (void *)(regs->pc), + sizeof(instruction))) { /* Argh. Fault on the instruction itself. This should never happen non-SMP */ @@ -571,13 +566,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, goto uspace_segv; } - tmp = handle_unaligned_access(instruction, regs); + tmp = handle_unaligned_access(instruction, regs, + &user_mem_access); set_fs(oldfs); if (tmp==0) return; /* sorted */ -#endif - uspace_segv: printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " "access (PC %lx PR %lx)\n", current->comm, regs->pc, @@ -592,9 +586,9 @@ uspace_segv: if (regs->pc & 1) die("unaligned program counter", regs, error_code); -#ifndef CONFIG_CPU_SH2A set_fs(KERNEL_DS); - if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { + if (copy_from_user(&instruction, (void *)(regs->pc), + sizeof(instruction))) { /* Argh. Fault on the instruction itself. This should never happen non-SMP */ @@ -602,14 +596,8 @@ uspace_segv: die("insn faulting in do_address_error", regs, 0); } - handle_unaligned_access(instruction, regs); + handle_unaligned_access(instruction, regs, &user_mem_access); set_fs(oldfs); -#else - printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " - "access\n", current->comm); - - force_sig(SIGSEGV, current); -#endif } } diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index c0b3c6f6edb5..a55ac81d795b 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -630,7 +630,7 @@ static int misaligned_fpu_load(struct pt_regs *regs, current->thread.fpu.hard.fp_regs[destreg] = buflo; current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; } else { -#if defined(CONFIG_LITTLE_ENDIAN) +#if defined(CONFIG_CPU_LITTLE_ENDIAN) current->thread.fpu.hard.fp_regs[destreg] = bufhi; current->thread.fpu.hard.fp_regs[destreg+1] = buflo; #else @@ -700,7 +700,7 @@ static int misaligned_fpu_store(struct pt_regs *regs, buflo = current->thread.fpu.hard.fp_regs[srcreg]; bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; } else { -#if defined(CONFIG_LITTLE_ENDIAN) +#if defined(CONFIG_CPU_LITTLE_ENDIAN) bufhi = current->thread.fpu.hard.fp_regs[srcreg]; buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; #else diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S index 3f1bd6392bb3..d1e177009a41 100644 --- a/arch/sh/kernel/vmlinux_64.lds.S +++ b/arch/sh/kernel/vmlinux_64.lds.S @@ -51,7 +51,7 @@ SECTIONS KPROBES_TEXT *(.fixup) *(.gnu.warning) -#ifdef CONFIG_LITTLE_ENDIAN +#ifdef CONFIG_CPU_LITTLE_ENDIAN } = 0x6ff0fff0 #else } = 0xf0fff06f |